mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-06-12 08:23:01 -04:00
Remove access manager (#470)
* remove access manager from code base * document new node ssh workflow * keep config backwards compatible * slow down link checking to prevent http 429 Signed-off-by: Fabian Kammel <fk@edgeless.systems>
This commit is contained in:
parent
b0f4a09ebe
commit
b92b3772ca
59 changed files with 251 additions and 2831 deletions
1
.github/docs/layout.md
vendored
1
.github/docs/layout.md
vendored
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
Core components:
|
Core components:
|
||||||
|
|
||||||
* [access_manager](/access_manager): Contains the access-manager pod used to persist SSH users based on a K8s ConfigMap
|
|
||||||
* [cli](/cli): The CLI is used to manage a Constellation cluster
|
* [cli](/cli): The CLI is used to manage a Constellation cluster
|
||||||
* [bootstrapper](/bootstrapper): The bootstrapper is a node agent whose most important task is to bootstrap a node
|
* [bootstrapper](/bootstrapper): The bootstrapper is a node agent whose most important task is to bootstrap a node
|
||||||
* [image](/image): Build files for the Constellation disk image
|
* [image](/image): Build files for the Constellation disk image
|
||||||
|
|
1
.github/docs/release.md
vendored
1
.github/docs/release.md
vendored
|
@ -33,7 +33,6 @@ This checklist will prepare `v1.3.0` from `v1.2.0`. Adjust your version numbers
|
||||||
```
|
```
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
gh workflow run build-micro-service-manual.yml --ref release/v$minor -F microService=access-manager -F imageTag=v$ver -F version=$ver --repo edgelesssys/constellation
|
|
||||||
gh workflow run build-micro-service-manual.yml --ref release/v$minor -F microService=join-service -F imageTag=v$ver -F version=$ver --repo edgelesssys/constellation
|
gh workflow run build-micro-service-manual.yml --ref release/v$minor -F microService=join-service -F imageTag=v$ver -F version=$ver --repo edgelesssys/constellation
|
||||||
gh workflow run build-micro-service-manual.yml --ref release/v$minor -F microService=kmsserver -F imageTag=v$ver -F version=$ver --repo edgelesssys/constellation
|
gh workflow run build-micro-service-manual.yml --ref release/v$minor -F microService=kmsserver -F imageTag=v$ver -F version=$ver --repo edgelesssys/constellation
|
||||||
gh workflow run build-micro-service-manual.yml --ref release/v$minor -F microService=verification-service -F imageTag=v$ver -F version=$ver --repo edgelesssys/constellation
|
gh workflow run build-micro-service-manual.yml --ref release/v$minor -F microService=verification-service -F imageTag=v$ver -F version=$ver --repo edgelesssys/constellation
|
||||||
|
|
42
.github/workflows/build-access-manager-image.yml
vendored
42
.github/workflows/build-access-manager-image.yml
vendored
|
@ -1,42 +0,0 @@
|
||||||
name: Build and upload access-manager image
|
|
||||||
env:
|
|
||||||
REGISTRY: ghcr.io
|
|
||||||
IMAGE_NAME: access-manager
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
- "release/**"
|
|
||||||
paths:
|
|
||||||
- "access_manager/**"
|
|
||||||
- "internal/deploy/**"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-access-manager:
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
steps:
|
|
||||||
- name: Check out repository
|
|
||||||
id: checkout
|
|
||||||
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # tag=v3.1.0
|
|
||||||
|
|
||||||
- name: Setup Go environment
|
|
||||||
uses: actions/setup-go@c4a742cab115ed795e34d4513e2cf7d472deb55f # tag=v3.3.1
|
|
||||||
with:
|
|
||||||
go-version: "1.19.3"
|
|
||||||
|
|
||||||
- name: Build and upload access-manager container image
|
|
||||||
id: build-and-upload
|
|
||||||
uses: ./.github/actions/build_micro_service
|
|
||||||
with:
|
|
||||||
name: access-manager
|
|
||||||
projectVersion: "0.0.0"
|
|
||||||
dockerfile: access_manager/Dockerfile
|
|
||||||
githubToken: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
cosignPublicKey: ${{ startsWith(github.ref, 'refs/heads/release/v') && secrets.COSIGN_PUBLIC_KEY || secrets.COSIGN_DEV_PUBLIC_KEY }}
|
|
||||||
cosignPrivateKey: ${{ startsWith(github.ref, 'refs/heads/release/v') && secrets.COSIGN_PRIVATE_KEY || secrets.COSIGN_DEV_PRIVATE_KEY }}
|
|
||||||
cosignPassword: ${{ startsWith(github.ref, 'refs/heads/release/v') && secrets.COSIGN_PASSWORD || secrets.COSIGN_DEV_PASSWORD }}
|
|
|
@ -7,12 +7,11 @@ on:
|
||||||
description: "Name of the micro-service image to build"
|
description: "Name of the micro-service image to build"
|
||||||
type: choice
|
type: choice
|
||||||
options:
|
options:
|
||||||
- "access-manager"
|
|
||||||
- "join-service"
|
- "join-service"
|
||||||
- "kmsserver"
|
- "kmsserver"
|
||||||
- "verification-service"
|
- "verification-service"
|
||||||
required: true
|
required: true
|
||||||
default: "access-manager"
|
default: "join-service"
|
||||||
imageTag:
|
imageTag:
|
||||||
description: "Container image tag"
|
description: "Container image tag"
|
||||||
required: true
|
required: true
|
||||||
|
@ -43,8 +42,6 @@ jobs:
|
||||||
id: set-variable
|
id: set-variable
|
||||||
run: |
|
run: |
|
||||||
case "${{ inputs.microService }}" in
|
case "${{ inputs.microService }}" in
|
||||||
"access-manager" )
|
|
||||||
echo "microServiceDockerfile=access_manager/Dockerfile" >> $GITHUB_ENV ;;
|
|
||||||
"join-service" )
|
"join-service" )
|
||||||
echo "microServiceDockerfile=joinservice/Dockerfile" >> $GITHUB_ENV ;;
|
echo "microServiceDockerfile=joinservice/Dockerfile" >> $GITHUB_ENV ;;
|
||||||
"kmsserver" )
|
"kmsserver" )
|
||||||
|
|
1
.github/workflows/check-links.yml
vendored
1
.github/workflows/check-links.yml
vendored
|
@ -26,6 +26,7 @@ jobs:
|
||||||
- name: Link Checker
|
- name: Link Checker
|
||||||
uses: lycheeverse/lychee-action@4dcb8bee2a0a4531cba1a1f392c54e8375d6dd81 # v1.5.4
|
uses: lycheeverse/lychee-action@4dcb8bee2a0a4531cba1a1f392c54e8375d6dd81 # v1.5.4
|
||||||
with:
|
with:
|
||||||
|
args: "--verbose --no-progress --max-concurrency 5 './**/*.md' './**/*.html'"
|
||||||
fail: true
|
fail: true
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||||
|
|
|
@ -30,6 +30,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||||
|
|
||||||
### Removed
|
### Removed
|
||||||
<!-- For now removed features. -->
|
<!-- For now removed features. -->
|
||||||
|
- `access-manager` was removed from code base. K8s native way to SSH into nodes documented.
|
||||||
|
|
||||||
|
|
||||||
## [2.2.0] - 2022-11-08
|
## [2.2.0] - 2022-11-08
|
||||||
|
|
||||||
|
|
|
@ -1,31 +0,0 @@
|
||||||
FROM fedora:36@sha256:455fec9590de794fbc21f61dbc7e90bf9918b58492d2a03fa269c09db47b43f6 as build
|
|
||||||
|
|
||||||
RUN dnf -y update && \
|
|
||||||
dnf -y install @development-tools pkg-config iproute iputils wget git jq openssl-devel cryptsetup-libs cryptsetup-devel && \
|
|
||||||
dnf clean all
|
|
||||||
|
|
||||||
# Install Go
|
|
||||||
ARG GO_VER=1.19.3
|
|
||||||
RUN wget -q https://go.dev/dl/go${GO_VER}.linux-amd64.tar.gz && \
|
|
||||||
tar -C /usr/local -xzf go${GO_VER}.linux-amd64.tar.gz && \
|
|
||||||
rm go${GO_VER}.linux-amd64.tar.gz
|
|
||||||
ENV PATH ${PATH}:/usr/local/go/bin
|
|
||||||
|
|
||||||
# Download go dependencies
|
|
||||||
WORKDIR /constellation/
|
|
||||||
COPY go.mod ./
|
|
||||||
COPY go.sum ./
|
|
||||||
RUN go mod download all
|
|
||||||
|
|
||||||
# Copy Repo
|
|
||||||
COPY . /constellation
|
|
||||||
RUN rm -rf ./hack/
|
|
||||||
|
|
||||||
# Build the access_manager
|
|
||||||
WORKDIR /constellation/access_manager/
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build CGO_ENABLED=0 go build -o /constellation/build/access_manager -ldflags "-s -w" .
|
|
||||||
|
|
||||||
# Copy the access_manager from build into a scratch container, which is eventually deployed into the cluster
|
|
||||||
FROM scratch as release
|
|
||||||
COPY --from=build /constellation/build/access_manager /access_manager
|
|
||||||
ENTRYPOINT [ "/access_manager" ]
|
|
|
@ -1,335 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/user"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
v1Options "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/client-go/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// hostPath holds the path to the host's root file system we chroot into.
|
|
||||||
hostPath = "/host"
|
|
||||||
|
|
||||||
// normalHomePath holds the general home directory of a system.
|
|
||||||
normalHomePath = "/var/home"
|
|
||||||
|
|
||||||
// evictedHomePath holds the directory to which deleted user directories are moved to.
|
|
||||||
evictedHomePath = "/var/evicted"
|
|
||||||
|
|
||||||
// relativePathToSSHKeys holds the path inside a user's directory to the SSH keys.
|
|
||||||
// Needs to be in sync with internal/deploy/ssh.go.
|
|
||||||
relativePathToSSHKeys = ".ssh/authorized_keys.d/constellation-ssh-keys"
|
|
||||||
|
|
||||||
// timeout is the maximum time to wait for communication with the Kubernetes API server.
|
|
||||||
timeout = 60 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
// uidGidPair holds the user owner and group owner of a directory.
|
|
||||||
type uidGIDPair struct {
|
|
||||||
UID uint32
|
|
||||||
GID uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription)
|
|
||||||
|
|
||||||
flag.Parse()
|
|
||||||
log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity))
|
|
||||||
|
|
||||||
hostname, err := os.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("Starting constellation-access-manager as unknown pod")
|
|
||||||
} else {
|
|
||||||
log.Infof("Starting constellation-access-manager as %q", hostname)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve configMap from Kubernetes API before we chroot into the host filesystem.
|
|
||||||
configMap, err := retrieveConfigMap(log)
|
|
||||||
if err != nil {
|
|
||||||
log.With(zap.Error(err)).Fatalf("Failed to retrieve ConfigMap from Kubernetes API")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Chroot into main system
|
|
||||||
if err := syscall.Chroot(hostPath); err != nil {
|
|
||||||
log.With(zap.Error(err)).Fatalf("Failed to chroot into host filesystem")
|
|
||||||
}
|
|
||||||
if err := syscall.Chdir("/"); err != nil {
|
|
||||||
log.With(zap.Error(err)).Fatalf("Failed to chdir into host filesystem")
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := afero.NewOsFs()
|
|
||||||
linuxUserManager := user.NewLinuxUserManager(fs)
|
|
||||||
|
|
||||||
if err := run(log, fs, linuxUserManager, configMap); err != nil {
|
|
||||||
// So far there is only one error path in this code, and this is getting the user directories... So just make the error specific here for now.
|
|
||||||
log.With(zap.Error(err)).Fatalf("Failed to retrieve existing user directories")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadClientSet loads the Kubernetes API client.
|
|
||||||
func loadClientSet() (*kubernetes.Clientset, error) {
|
|
||||||
// creates the in-cluster config
|
|
||||||
config, err := rest.InClusterConfig()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// creates the clientset
|
|
||||||
clientset, err := kubernetes.NewForConfig(config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return clientset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// deployKeys creates or evicts users based on the ConfigMap and deploy their SSH keys.
|
|
||||||
func deployKeys(
|
|
||||||
ctx context.Context, log *logger.Logger, configMap *v1.ConfigMap, fs afero.Fs,
|
|
||||||
linuxUserManager user.LinuxUserManager, userMap map[string]uidGIDPair, sshAccess *ssh.Access,
|
|
||||||
) {
|
|
||||||
// If no ConfigMap exists or has been emptied, evict all users and exit.
|
|
||||||
if configMap == nil || len(configMap.Data) == 0 {
|
|
||||||
for username, ownership := range userMap {
|
|
||||||
log := log.With(zap.String("username", username))
|
|
||||||
if username != "root" {
|
|
||||||
evictedUserPath := path.Join(evictedHomePath, username)
|
|
||||||
log.With(zap.Uint32("UID", ownership.UID), zap.Uint32("GID", ownership.GID)).
|
|
||||||
Infof("Evicting user to %q", evictedUserPath)
|
|
||||||
|
|
||||||
if err := evictUser(username, fs, linuxUserManager); err != nil {
|
|
||||||
log.With(zap.Error(err)).Errorf("Did not evict user")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Infof("Removing any old keys for 'root', if existent")
|
|
||||||
// Remove root's SSH key specifically instead of evicting the whole directory.
|
|
||||||
if err := evictRootKey(fs, linuxUserManager); err != nil && !os.IsNotExist(err) {
|
|
||||||
log.With(zap.Error(err)).Errorf("Failed to remove previously existing root key")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// First, recreate users that already existed, if they are defined in the configMap.
|
|
||||||
// For users which do not exist, we move their user directories to avoid accidental takeovers but also loss of data.
|
|
||||||
for username, ownership := range userMap {
|
|
||||||
log := log.With(zap.String("username", username))
|
|
||||||
if username != "root" {
|
|
||||||
if _, ok := configMap.Data[username]; ok {
|
|
||||||
log.With(zap.Uint32("UID", ownership.UID), zap.Uint32("GID", ownership.GID)).
|
|
||||||
Infof("Recreating user, if not existent")
|
|
||||||
|
|
||||||
if err := linuxUserManager.Creator.CreateUserWithSpecificUIDAndGID(
|
|
||||||
ctx, username, int(ownership.UID), int(ownership.GID),
|
|
||||||
); err != nil {
|
|
||||||
if errors.Is(err, user.ErrUserOrGroupAlreadyExists) {
|
|
||||||
log.Infof("User already exists, skipping")
|
|
||||||
} else {
|
|
||||||
log.With(zap.Error(err)).Errorf("Failed to recreate user")
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
evictedUserPath := path.Join(evictedHomePath, username)
|
|
||||||
log.With(zap.Uint32("UID", ownership.UID), zap.Uint32("GID", ownership.GID)).
|
|
||||||
Infof("Evicting user to %q", evictedUserPath)
|
|
||||||
if err := evictUser(username, fs, linuxUserManager); err != nil {
|
|
||||||
log.With(zap.Error(err)).Errorf("Did not evict user")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Infof("Removing any old keys for 'root', if existent")
|
|
||||||
// Always remove the root key first, even if it is about to be redeployed.
|
|
||||||
if err := evictRootKey(fs, linuxUserManager); err != nil && !os.IsNotExist(err) {
|
|
||||||
log.With(zap.Error(err)).Errorf("Failed to remove previously existing root key")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then, create the remaining users from the configMap (if remaining) and deploy SSH keys for all users.
|
|
||||||
for username, publicKey := range configMap.Data {
|
|
||||||
log := log.With(zap.String("username", username))
|
|
||||||
if _, ok := userMap[username]; !ok {
|
|
||||||
log.Infof("Creating user")
|
|
||||||
if err := linuxUserManager.Creator.CreateUser(ctx, username); err != nil {
|
|
||||||
if errors.Is(err, user.ErrUserOrGroupAlreadyExists) {
|
|
||||||
log.Infof("User already exists, skipping")
|
|
||||||
} else {
|
|
||||||
log.With(zap.Error(err)).Errorf("Failed to create user")
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we created a user, let's actually get the home directory instead of assuming it's the same as the normal home directory.
|
|
||||||
user, err := linuxUserManager.GetLinuxUser(username)
|
|
||||||
if err != nil {
|
|
||||||
log.With(zap.Error(err)).Errorf("Failed to retrieve information about user")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete already deployed keys
|
|
||||||
pathToSSHKeys := filepath.Join(user.Home, relativePathToSSHKeys)
|
|
||||||
if err := fs.Remove(pathToSSHKeys); err != nil && !os.IsNotExist(err) {
|
|
||||||
log.With(zap.Error(err)).Errorf("Failed to delete remaining managed SSH keys for user")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// And (re)deploy the keys from the ConfigMap
|
|
||||||
newKey := ssh.UserKey{
|
|
||||||
Username: username,
|
|
||||||
PublicKey: publicKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("Deploying new SSH key for user")
|
|
||||||
if err := sshAccess.DeployAuthorizedKey(context.Background(), newKey); err != nil {
|
|
||||||
log.With(zap.Error(err)).Errorf("Failed to deploy SSH keys for user")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// evictUser moves a user directory to evictedPath and changes their owner recursive to root.
|
|
||||||
func evictUser(username string, fs afero.Fs, linuxUserManager user.LinuxUserManager) error {
|
|
||||||
if _, err := linuxUserManager.GetLinuxUser(username); err == nil {
|
|
||||||
return fmt.Errorf("user '%s' still seems to exist", username)
|
|
||||||
}
|
|
||||||
|
|
||||||
// First, ensure evictedPath already exists.
|
|
||||||
if err := fs.MkdirAll(evictedHomePath, 0o700); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build paths to the user's home directory and evicted home directory, which includes a timestamp to avoid collisions.
|
|
||||||
oldUserDir := path.Join(normalHomePath, username)
|
|
||||||
evictedUserDir := path.Join(evictedHomePath, fmt.Sprintf("%s_%d", username, time.Now().Unix()))
|
|
||||||
|
|
||||||
// Move old, not recreated user directory to evictedPath.
|
|
||||||
if err := fs.Rename(oldUserDir, evictedUserDir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Chown the user directory and all files inside to root, but do not change permissions to allow recovery without messed up permissions.
|
|
||||||
if err := fs.Chown(evictedUserDir, 0, 0); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := afero.Walk(fs, evictedUserDir, func(name string, info os.FileInfo, err error) error {
|
|
||||||
if err == nil {
|
|
||||||
err = fs.Chown(name, 0, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// evictRootKey removes the root key from the filesystem, instead of evicting the whole user directory.
|
|
||||||
func evictRootKey(fs afero.Fs, linuxUserManager user.LinuxUserManager) error {
|
|
||||||
user, err := linuxUserManager.GetLinuxUser("root")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete already deployed keys
|
|
||||||
pathToSSHKeys := filepath.Join(user.Home, relativePathToSSHKeys)
|
|
||||||
if err := fs.Remove(pathToSSHKeys); err != nil && !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// retrieveConfigMap contacts the Kubernetes API server and retrieves the ssh-users ConfigMap.
|
|
||||||
func retrieveConfigMap(log *logger.Logger) (*v1.ConfigMap, error) {
|
|
||||||
// Authenticate with the Kubernetes API and get the information from the ssh-users ConfigMap to recreate the users we need.
|
|
||||||
log.Infof("Authenticating with Kubernetes...")
|
|
||||||
clientset, err := loadClientSet()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
log.Infof("Requesting 'ssh-users' ConfigMap...")
|
|
||||||
configmap, err := clientset.CoreV1().ConfigMaps("kube-system").Get(ctx, "ssh-users", v1Options.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return configmap, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateUserMap iterates the list of existing home directories to create a map of previously existing usernames to their previous respective UID and GID.
|
|
||||||
func generateUserMap(log *logger.Logger, fs afero.Fs) (map[string]uidGIDPair, error) {
|
|
||||||
// Go through the normalHomePath directory, and create a mapping of existing user names in combination with their owner's UID & GID.
|
|
||||||
// We use this information later to create missing users under the same UID and GID to avoid breakage.
|
|
||||||
fileInfo, err := afero.ReadDir(fs, normalHomePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
userMap := make(map[string]uidGIDPair)
|
|
||||||
userMap["root"] = uidGIDPair{UID: 0, GID: 0}
|
|
||||||
// This will fail under MemMapFS, since it's not UNIX-compatible.
|
|
||||||
for _, singleInfo := range fileInfo {
|
|
||||||
log := log.With("username", singleInfo.Name())
|
|
||||||
// Fail gracefully instead of hard.
|
|
||||||
if stat, ok := singleInfo.Sys().(*syscall.Stat_t); ok {
|
|
||||||
userMap[singleInfo.Name()] = uidGIDPair{UID: stat.Uid, GID: stat.Gid}
|
|
||||||
log.With(zap.Uint32("UID", stat.Uid), zap.Uint32("GID", stat.Gid)).
|
|
||||||
Infof("Found home directory for user")
|
|
||||||
} else {
|
|
||||||
log.Warnf("Failed to retrieve UNIX stat for user. User will not be evicted, or if this directory belongs to a user that is to be created later, it might be created under a different UID/GID than before")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return userMap, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func run(log *logger.Logger, fs afero.Fs, linuxUserManager user.LinuxUserManager, configMap *v1.ConfigMap) error {
|
|
||||||
sshAccess := ssh.NewAccess(log, linuxUserManager)
|
|
||||||
|
|
||||||
// Generate userMap containing existing user directories and their ownership
|
|
||||||
userMap, err := generateUserMap(log, fs)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to deploy keys based on configmap.
|
|
||||||
deployKeys(context.Background(), log, configMap, fs, linuxUserManager, userMap, sshAccess)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,331 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/user"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/goleak"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
|
||||||
goleak.VerifyTestMain(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEvictUser(t *testing.T) {
|
|
||||||
require := require.New(t)
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
linuxUserManager := user.NewLinuxUserManagerFake(fs)
|
|
||||||
|
|
||||||
// Create fake user directory
|
|
||||||
homePath := path.Join(normalHomePath, "myuser")
|
|
||||||
err := fs.MkdirAll(homePath, 0o700)
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
// Try to evict the user
|
|
||||||
assert.NoError(evictUser("myuser", fs, linuxUserManager))
|
|
||||||
|
|
||||||
// Check if user has been evicted
|
|
||||||
homeEntries, err := afero.ReadDir(fs, normalHomePath)
|
|
||||||
require.NoError(err)
|
|
||||||
evictedEntries, err := afero.ReadDir(fs, evictedHomePath)
|
|
||||||
require.NoError(err)
|
|
||||||
assert.Len(homeEntries, 0)
|
|
||||||
assert.Len(evictedEntries, 1)
|
|
||||||
for _, singleEntry := range evictedEntries {
|
|
||||||
assert.Contains(singleEntry.Name(), "myuser")
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Note: Unfourtunaly, due to a bug in afero, we cannot test that the files inside the directory have actually been moved.
|
|
||||||
This works on the real filesystem, but not on the memory filesystem.
|
|
||||||
See: https://github.com/spf13/afero/issues/141 (known since 2017, guess it will never get fixed ¯\_(ツ)_/¯)
|
|
||||||
This limits the scope of this test, obviously... But I think as long as we can move the directory,
|
|
||||||
the functionality on the real filesystem should be there (unless it throws an error).
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeployKeys(t *testing.T) {
|
|
||||||
require := require.New(t)
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
testCases := map[string]struct {
|
|
||||||
configMap *v1.ConfigMap
|
|
||||||
existingUsers map[string]uidGIDPair
|
|
||||||
}{
|
|
||||||
"undefined": {},
|
|
||||||
"undefined map, empty users": {existingUsers: map[string]uidGIDPair{}},
|
|
||||||
"empty map, undefined users": {configMap: &v1.ConfigMap{}},
|
|
||||||
"both empty": {
|
|
||||||
configMap: &v1.ConfigMap{
|
|
||||||
Data: map[string]string{},
|
|
||||||
},
|
|
||||||
existingUsers: map[string]uidGIDPair{},
|
|
||||||
},
|
|
||||||
"create two users, no existing users": {
|
|
||||||
configMap: &v1.ConfigMap{
|
|
||||||
Data: map[string]string{
|
|
||||||
"user1": "ssh-rsa abcdefgh",
|
|
||||||
"user2": "ssh-ed25519 defghijklm",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingUsers: map[string]uidGIDPair{},
|
|
||||||
},
|
|
||||||
"empty configMap, user1 and user2 should be evicted": {
|
|
||||||
configMap: &v1.ConfigMap{
|
|
||||||
Data: map[string]string{},
|
|
||||||
},
|
|
||||||
existingUsers: map[string]uidGIDPair{
|
|
||||||
"user1": {
|
|
||||||
UID: 1000,
|
|
||||||
GID: 1000,
|
|
||||||
},
|
|
||||||
"user2": {
|
|
||||||
UID: 1001,
|
|
||||||
GID: 1001,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"configMap contains user2, user1 should be evicted, user2 recreated": {
|
|
||||||
configMap: &v1.ConfigMap{
|
|
||||||
Data: map[string]string{
|
|
||||||
"user2": "ssh-rsa abcdefg",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingUsers: map[string]uidGIDPair{
|
|
||||||
"user1": {
|
|
||||||
UID: 1000,
|
|
||||||
GID: 1000,
|
|
||||||
},
|
|
||||||
"user2": {
|
|
||||||
UID: 1001,
|
|
||||||
GID: 1001,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"configMap contains user1 and user3, user1 should be recreated, user2 evicted, user3 created": {
|
|
||||||
configMap: &v1.ConfigMap{
|
|
||||||
Data: map[string]string{
|
|
||||||
"user1": "ssh-rsa abcdefg",
|
|
||||||
"user3": "ssh-ed25519 defghijklm",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingUsers: map[string]uidGIDPair{
|
|
||||||
"user1": {
|
|
||||||
UID: 1000,
|
|
||||||
GID: 1000,
|
|
||||||
},
|
|
||||||
"user2": {
|
|
||||||
UID: 1001,
|
|
||||||
GID: 1001,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"configMap contains user1 and user3, both should be recreated": {
|
|
||||||
configMap: &v1.ConfigMap{
|
|
||||||
Data: map[string]string{
|
|
||||||
"user1": "ssh-rsa abcdefg",
|
|
||||||
"user3": "ssh-ed25519 defghijklm",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingUsers: map[string]uidGIDPair{
|
|
||||||
"user1": {
|
|
||||||
UID: 1000,
|
|
||||||
GID: 1000,
|
|
||||||
},
|
|
||||||
"user3": {
|
|
||||||
UID: 1002,
|
|
||||||
GID: 1002,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"configMap contains user2, user1 and user3 should be evicted, user2 should be created": {
|
|
||||||
configMap: &v1.ConfigMap{
|
|
||||||
Data: map[string]string{
|
|
||||||
"user2": "ssh-ed25519 defghijklm",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingUsers: map[string]uidGIDPair{
|
|
||||||
"user1": {
|
|
||||||
UID: 1000,
|
|
||||||
GID: 1000,
|
|
||||||
},
|
|
||||||
"user3": {
|
|
||||||
UID: 1002,
|
|
||||||
GID: 1002,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(fs.MkdirAll(normalHomePath, 0o700))
|
|
||||||
require.NoError(fs.Mkdir("/etc", 0o644))
|
|
||||||
_, err := fs.Create("/etc/passwd")
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
// Create fake user directories
|
|
||||||
for user := range tc.existingUsers {
|
|
||||||
userHomePath := path.Join(normalHomePath, user)
|
|
||||||
err := fs.MkdirAll(userHomePath, 0o700)
|
|
||||||
require.NoError(err)
|
|
||||||
require.NoError(fs.Chown(userHomePath, int(tc.existingUsers[user].UID), int(tc.existingUsers[user].GID)))
|
|
||||||
}
|
|
||||||
|
|
||||||
log := logger.NewTest(t)
|
|
||||||
linuxUserManager := user.NewLinuxUserManagerFake(fs)
|
|
||||||
sshAccess := ssh.NewAccess(log, linuxUserManager)
|
|
||||||
deployKeys(context.Background(), log, tc.configMap, fs, linuxUserManager, tc.existingUsers, sshAccess)
|
|
||||||
|
|
||||||
// Unfortunately, we cannot retrieve the UID/GID from afero's MemMapFs without weird hacks,
|
|
||||||
// as it does not have getters and it is not exported.
|
|
||||||
if tc.configMap != nil && tc.existingUsers != nil {
|
|
||||||
// Parse /etc/passwd and check for users
|
|
||||||
passwdEntries, err := linuxUserManager.Passwd.Parse(fs)
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
// Check recreation or deletion
|
|
||||||
for user := range tc.existingUsers {
|
|
||||||
if _, ok := tc.configMap.Data[user]; ok {
|
|
||||||
checkHomeDirectory(user, fs, assert, true)
|
|
||||||
|
|
||||||
// Check if user exists in /etc/passwd
|
|
||||||
userEntry, ok := passwdEntries[user]
|
|
||||||
assert.True(ok)
|
|
||||||
|
|
||||||
// Check if user has been recreated with correct UID/GID
|
|
||||||
actualUID, err := strconv.Atoi(userEntry.UID)
|
|
||||||
assert.NoError(err)
|
|
||||||
assert.EqualValues(tc.existingUsers[user].UID, actualUID)
|
|
||||||
actualGID, err := strconv.Atoi(userEntry.GID)
|
|
||||||
assert.NoError(err)
|
|
||||||
assert.EqualValues(tc.existingUsers[user].GID, actualGID)
|
|
||||||
|
|
||||||
// Check if the user has the right keys
|
|
||||||
checkSSHKeys(user, fs, assert, tc.configMap.Data[user]+"\n")
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// Check if home directory is not available anymore under the regular path
|
|
||||||
checkHomeDirectory(user, fs, assert, false)
|
|
||||||
|
|
||||||
// Check if home directory has been evicted
|
|
||||||
homeDirs, err := afero.ReadDir(fs, evictedHomePath)
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
var userDirectoryName string
|
|
||||||
for _, singleDir := range homeDirs {
|
|
||||||
if strings.Contains(singleDir.Name(), user+"_") {
|
|
||||||
userDirectoryName = singleDir.Name()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.NotEmpty(userDirectoryName)
|
|
||||||
|
|
||||||
// Check if user does not exist in /etc/passwd
|
|
||||||
_, ok := passwdEntries[user]
|
|
||||||
assert.False(ok)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check creation of new users
|
|
||||||
for user := range tc.configMap.Data {
|
|
||||||
// We already checked recreated or evicted users, so skip them.
|
|
||||||
if _, ok := tc.existingUsers[user]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
checkHomeDirectory(user, fs, assert, true)
|
|
||||||
checkSSHKeys(user, fs, assert, tc.configMap.Data[user]+"\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEvictRootKey(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
require := require.New(t)
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create /etc/passwd with root entry
|
|
||||||
require.NoError(fs.Mkdir("/etc", 0o644))
|
|
||||||
file, err := fs.Create("/etc/passwd")
|
|
||||||
require.NoError(err)
|
|
||||||
passwdRootEntry := "root:x:0:0:root:/root:/bin/bash\n"
|
|
||||||
n, err := file.WriteString(passwdRootEntry)
|
|
||||||
require.NoError(err)
|
|
||||||
require.Equal(len(passwdRootEntry), n)
|
|
||||||
|
|
||||||
// Deploy a fake key for root
|
|
||||||
require.NoError(fs.MkdirAll("/root/.ssh/authorized_keys.d", 0o700))
|
|
||||||
file, err = fs.Create(filepath.Join("/root", relativePathToSSHKeys))
|
|
||||||
require.NoError(err)
|
|
||||||
_, err = file.WriteString("ssh-ed25519 abcdefghijklm\n")
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
linuxUserManager := user.NewLinuxUserManagerFake(fs)
|
|
||||||
|
|
||||||
// Parse /etc/passwd and check for users
|
|
||||||
passwdEntries, err := linuxUserManager.Passwd.Parse(fs)
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
// Check if user exists in /etc/passwd
|
|
||||||
userEntry, ok := passwdEntries["root"]
|
|
||||||
assert.True(ok)
|
|
||||||
|
|
||||||
// Check if user has been recreated with correct UID/GID
|
|
||||||
actualUID, err := strconv.Atoi(userEntry.UID)
|
|
||||||
assert.NoError(err)
|
|
||||||
assert.EqualValues(0, actualUID)
|
|
||||||
actualGID, err := strconv.Atoi(userEntry.GID)
|
|
||||||
assert.NoError(err)
|
|
||||||
assert.EqualValues(0, actualGID)
|
|
||||||
|
|
||||||
// Delete the key
|
|
||||||
assert.NoError(evictRootKey(fs, linuxUserManager))
|
|
||||||
|
|
||||||
// Check if the key has been deleted
|
|
||||||
_, err = fs.Stat(filepath.Join("/root", relativePathToSSHKeys))
|
|
||||||
assert.True(os.IsNotExist(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkSSHKeys(user string, fs afero.Fs, assert *assert.Assertions, expectedValue string) {
|
|
||||||
// Do the same check as above
|
|
||||||
_, err := fs.Stat(path.Join(normalHomePath, user))
|
|
||||||
assert.NoError(err)
|
|
||||||
|
|
||||||
// Check if the user has the right keys
|
|
||||||
authorizedKeys, err := afero.ReadFile(fs, filepath.Join(normalHomePath, user, relativePathToSSHKeys))
|
|
||||||
assert.NoError(err)
|
|
||||||
assert.EqualValues(expectedValue, string(authorizedKeys))
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkHomeDirectory(user string, fs afero.Fs, assert *assert.Assertions, shouldExist bool) {
|
|
||||||
_, err := fs.Stat(path.Join(normalHomePath, user))
|
|
||||||
if shouldExist {
|
|
||||||
assert.NoError(err)
|
|
||||||
} else {
|
|
||||||
assert.Error(err)
|
|
||||||
assert.True(os.IsNotExist(err))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -21,7 +21,7 @@ type clusterFake struct{}
|
||||||
// InitCluster fakes bootstrapping a new cluster with the current node being the master, returning the arguments required to join the cluster.
|
// InitCluster fakes bootstrapping a new cluster with the current node being the master, returning the arguments required to join the cluster.
|
||||||
func (c *clusterFake) InitCluster(
|
func (c *clusterFake) InitCluster(
|
||||||
context.Context, string, string, []byte, []uint32, bool, []byte, bool,
|
context.Context, string, string, []byte, []uint32, bool, []byte, bool,
|
||||||
map[string]string, []byte, bool, *logger.Logger,
|
[]byte, bool, *logger.Logger,
|
||||||
) ([]byte, error) {
|
) ([]byte, error) {
|
||||||
return []byte{}, nil
|
return []byte{}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,19 +26,19 @@ type InitRequest struct {
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
// repeated string autoscaling_node_groups = 1; removed
|
// repeated string autoscaling_node_groups = 1; removed
|
||||||
MasterSecret []byte `protobuf:"bytes,2,opt,name=master_secret,json=masterSecret,proto3" json:"master_secret,omitempty"`
|
MasterSecret []byte `protobuf:"bytes,2,opt,name=master_secret,json=masterSecret,proto3" json:"master_secret,omitempty"`
|
||||||
KmsUri string `protobuf:"bytes,3,opt,name=kms_uri,json=kmsUri,proto3" json:"kms_uri,omitempty"`
|
KmsUri string `protobuf:"bytes,3,opt,name=kms_uri,json=kmsUri,proto3" json:"kms_uri,omitempty"`
|
||||||
StorageUri string `protobuf:"bytes,4,opt,name=storage_uri,json=storageUri,proto3" json:"storage_uri,omitempty"`
|
StorageUri string `protobuf:"bytes,4,opt,name=storage_uri,json=storageUri,proto3" json:"storage_uri,omitempty"`
|
||||||
KeyEncryptionKeyId string `protobuf:"bytes,5,opt,name=key_encryption_key_id,json=keyEncryptionKeyId,proto3" json:"key_encryption_key_id,omitempty"`
|
KeyEncryptionKeyId string `protobuf:"bytes,5,opt,name=key_encryption_key_id,json=keyEncryptionKeyId,proto3" json:"key_encryption_key_id,omitempty"`
|
||||||
UseExistingKek bool `protobuf:"varint,6,opt,name=use_existing_kek,json=useExistingKek,proto3" json:"use_existing_kek,omitempty"`
|
UseExistingKek bool `protobuf:"varint,6,opt,name=use_existing_kek,json=useExistingKek,proto3" json:"use_existing_kek,omitempty"`
|
||||||
CloudServiceAccountUri string `protobuf:"bytes,7,opt,name=cloud_service_account_uri,json=cloudServiceAccountUri,proto3" json:"cloud_service_account_uri,omitempty"`
|
CloudServiceAccountUri string `protobuf:"bytes,7,opt,name=cloud_service_account_uri,json=cloudServiceAccountUri,proto3" json:"cloud_service_account_uri,omitempty"`
|
||||||
KubernetesVersion string `protobuf:"bytes,8,opt,name=kubernetes_version,json=kubernetesVersion,proto3" json:"kubernetes_version,omitempty"`
|
KubernetesVersion string `protobuf:"bytes,8,opt,name=kubernetes_version,json=kubernetesVersion,proto3" json:"kubernetes_version,omitempty"`
|
||||||
SshUserKeys []*SSHUserKey `protobuf:"bytes,9,rep,name=ssh_user_keys,json=sshUserKeys,proto3" json:"ssh_user_keys,omitempty"`
|
// repeated SSHUserKey ssh_user_keys = 9; removed
|
||||||
Salt []byte `protobuf:"bytes,10,opt,name=salt,proto3" json:"salt,omitempty"`
|
Salt []byte `protobuf:"bytes,10,opt,name=salt,proto3" json:"salt,omitempty"`
|
||||||
HelmDeployments []byte `protobuf:"bytes,11,opt,name=helm_deployments,json=helmDeployments,proto3" json:"helm_deployments,omitempty"`
|
HelmDeployments []byte `protobuf:"bytes,11,opt,name=helm_deployments,json=helmDeployments,proto3" json:"helm_deployments,omitempty"`
|
||||||
EnforcedPcrs []uint32 `protobuf:"varint,12,rep,packed,name=enforced_pcrs,json=enforcedPcrs,proto3" json:"enforced_pcrs,omitempty"`
|
EnforcedPcrs []uint32 `protobuf:"varint,12,rep,packed,name=enforced_pcrs,json=enforcedPcrs,proto3" json:"enforced_pcrs,omitempty"`
|
||||||
EnforceIdkeydigest bool `protobuf:"varint,13,opt,name=enforce_idkeydigest,json=enforceIdkeydigest,proto3" json:"enforce_idkeydigest,omitempty"`
|
EnforceIdkeydigest bool `protobuf:"varint,13,opt,name=enforce_idkeydigest,json=enforceIdkeydigest,proto3" json:"enforce_idkeydigest,omitempty"`
|
||||||
ConformanceMode bool `protobuf:"varint,14,opt,name=conformance_mode,json=conformanceMode,proto3" json:"conformance_mode,omitempty"`
|
ConformanceMode bool `protobuf:"varint,14,opt,name=conformance_mode,json=conformanceMode,proto3" json:"conformance_mode,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *InitRequest) Reset() {
|
func (x *InitRequest) Reset() {
|
||||||
|
@ -122,13 +122,6 @@ func (x *InitRequest) GetKubernetesVersion() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *InitRequest) GetSshUserKeys() []*SSHUserKey {
|
|
||||||
if x != nil {
|
|
||||||
return x.SshUserKeys
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *InitRequest) GetSalt() []byte {
|
func (x *InitRequest) GetSalt() []byte {
|
||||||
if x != nil {
|
if x != nil {
|
||||||
return x.Salt
|
return x.Salt
|
||||||
|
@ -227,66 +220,11 @@ func (x *InitResponse) GetClusterId() []byte {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type SSHUserKey struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
|
|
||||||
PublicKey string `protobuf:"bytes,2,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SSHUserKey) Reset() {
|
|
||||||
*x = SSHUserKey{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_init_proto_msgTypes[2]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SSHUserKey) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*SSHUserKey) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *SSHUserKey) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_init_proto_msgTypes[2]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use SSHUserKey.ProtoReflect.Descriptor instead.
|
|
||||||
func (*SSHUserKey) Descriptor() ([]byte, []int) {
|
|
||||||
return file_init_proto_rawDescGZIP(), []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SSHUserKey) GetUsername() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Username
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SSHUserKey) GetPublicKey() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.PublicKey
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
var File_init_proto protoreflect.FileDescriptor
|
var File_init_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
var file_init_proto_rawDesc = []byte{
|
var file_init_proto_rawDesc = []byte{
|
||||||
0x0a, 0x0a, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x69, 0x6e,
|
0x0a, 0x0a, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x69, 0x6e,
|
||||||
0x69, 0x74, 0x22, 0xa9, 0x04, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
|
0x69, 0x74, 0x22, 0xf3, 0x03, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||||
0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63,
|
0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63,
|
||||||
0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6d, 0x61, 0x73, 0x74, 0x65,
|
0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6d, 0x61, 0x73, 0x74, 0x65,
|
||||||
0x72, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x75,
|
0x72, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x75,
|
||||||
|
@ -305,42 +243,34 @@ var file_init_proto_rawDesc = []byte{
|
||||||
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x72, 0x69, 0x12, 0x2d, 0x0a, 0x12, 0x6b, 0x75, 0x62,
|
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x72, 0x69, 0x12, 0x2d, 0x0a, 0x12, 0x6b, 0x75, 0x62,
|
||||||
0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
|
0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
|
||||||
0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65,
|
0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65,
|
||||||
0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x0d, 0x73, 0x73, 0x68, 0x5f,
|
0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74,
|
||||||
0x75, 0x73, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x12, 0x29, 0x0a, 0x10,
|
||||||
0x10, 0x2e, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x55, 0x73, 0x65, 0x72, 0x4b, 0x65,
|
0x68, 0x65, 0x6c, 0x6d, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x73,
|
||||||
0x79, 0x52, 0x0b, 0x73, 0x73, 0x68, 0x55, 0x73, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x12,
|
0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x68, 0x65, 0x6c, 0x6d, 0x44, 0x65, 0x70, 0x6c,
|
||||||
0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61,
|
0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x66, 0x6f, 0x72,
|
||||||
0x6c, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x65, 0x6c, 0x6d, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f,
|
0x63, 0x65, 0x64, 0x5f, 0x70, 0x63, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0c,
|
||||||
0x79, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x68, 0x65,
|
0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x50, 0x63, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x13,
|
||||||
0x6c, 0x6d, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x0a,
|
0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x6b, 0x65, 0x79, 0x64, 0x69, 0x67,
|
||||||
0x0d, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x70, 0x63, 0x72, 0x73, 0x18, 0x0c,
|
0x65, 0x73, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x6e, 0x66, 0x6f, 0x72,
|
||||||
0x20, 0x03, 0x28, 0x0d, 0x52, 0x0c, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x50, 0x63,
|
0x63, 0x65, 0x49, 0x64, 0x6b, 0x65, 0x79, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a,
|
||||||
0x72, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64,
|
0x10, 0x63, 0x6f, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6d, 0x6f, 0x64,
|
||||||
0x6b, 0x65, 0x79, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52,
|
0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x6f, 0x72, 0x6d,
|
||||||
0x12, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x49, 0x64, 0x6b, 0x65, 0x79, 0x64, 0x69, 0x67,
|
0x61, 0x6e, 0x63, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x68, 0x0a, 0x0c, 0x49, 0x6e, 0x69, 0x74,
|
||||||
0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e,
|
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x6b, 0x75, 0x62, 0x65,
|
||||||
0x63, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63,
|
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6b, 0x75,
|
||||||
0x6f, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x68,
|
0x62, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65,
|
||||||
0x0a, 0x0c, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e,
|
0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65,
|
||||||
0x0a, 0x0a, 0x6b, 0x75, 0x62, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01,
|
0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
|
||||||
0x28, 0x0c, 0x52, 0x0a, 0x6b, 0x75, 0x62, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19,
|
0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
|
||||||
0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
|
0x49, 0x64, 0x32, 0x34, 0x0a, 0x03, 0x41, 0x50, 0x49, 0x12, 0x2d, 0x0a, 0x04, 0x49, 0x6e, 0x69,
|
||||||
0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75,
|
0x74, 0x12, 0x11, 0x2e, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71,
|
||||||
0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63,
|
0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74,
|
||||||
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x22, 0x47, 0x0a, 0x0a, 0x53, 0x53, 0x48, 0x55,
|
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
|
||||||
0x73, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61,
|
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x73,
|
||||||
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61,
|
0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79,
|
0x2f, 0x76, 0x32, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
|
||||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65,
|
0x2f, 0x69, 0x6e, 0x69, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||||
0x79, 0x32, 0x34, 0x0a, 0x03, 0x41, 0x50, 0x49, 0x12, 0x2d, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74,
|
0x6f, 0x33,
|
||||||
0x12, 0x11, 0x2e, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75,
|
|
||||||
0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52,
|
|
||||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75,
|
|
||||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x73, 0x79,
|
|
||||||
0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f,
|
|
||||||
0x76, 0x32, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2f,
|
|
||||||
0x69, 0x6e, 0x69, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
|
||||||
0x33,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -355,21 +285,19 @@ func file_init_proto_rawDescGZIP() []byte {
|
||||||
return file_init_proto_rawDescData
|
return file_init_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
|
||||||
var file_init_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
var file_init_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||||
var file_init_proto_goTypes = []interface{}{
|
var file_init_proto_goTypes = []interface{}{
|
||||||
(*InitRequest)(nil), // 0: init.InitRequest
|
(*InitRequest)(nil), // 0: init.InitRequest
|
||||||
(*InitResponse)(nil), // 1: init.InitResponse
|
(*InitResponse)(nil), // 1: init.InitResponse
|
||||||
(*SSHUserKey)(nil), // 2: init.SSHUserKey
|
|
||||||
}
|
}
|
||||||
var file_init_proto_depIdxs = []int32{
|
var file_init_proto_depIdxs = []int32{
|
||||||
2, // 0: init.InitRequest.ssh_user_keys:type_name -> init.SSHUserKey
|
0, // 0: init.API.Init:input_type -> init.InitRequest
|
||||||
0, // 1: init.API.Init:input_type -> init.InitRequest
|
1, // 1: init.API.Init:output_type -> init.InitResponse
|
||||||
1, // 2: init.API.Init:output_type -> init.InitResponse
|
1, // [1:2] is the sub-list for method output_type
|
||||||
2, // [2:3] is the sub-list for method output_type
|
0, // [0:1] is the sub-list for method input_type
|
||||||
1, // [1:2] is the sub-list for method input_type
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
1, // [1:1] is the sub-list for extension type_name
|
0, // [0:0] is the sub-list for extension extendee
|
||||||
1, // [1:1] is the sub-list for extension extendee
|
0, // [0:0] is the sub-list for field type_name
|
||||||
0, // [0:1] is the sub-list for field type_name
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { file_init_proto_init() }
|
func init() { file_init_proto_init() }
|
||||||
|
@ -402,18 +330,6 @@ func file_init_proto_init() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_init_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*SSHUserKey); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
type x struct{}
|
type x struct{}
|
||||||
out := protoimpl.TypeBuilder{
|
out := protoimpl.TypeBuilder{
|
||||||
|
@ -421,7 +337,7 @@ func file_init_proto_init() {
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: file_init_proto_rawDesc,
|
RawDescriptor: file_init_proto_rawDesc,
|
||||||
NumEnums: 0,
|
NumEnums: 0,
|
||||||
NumMessages: 3,
|
NumMessages: 2,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
NumServices: 1,
|
NumServices: 1,
|
||||||
},
|
},
|
||||||
|
|
|
@ -17,7 +17,7 @@ message InitRequest {
|
||||||
bool use_existing_kek = 6;
|
bool use_existing_kek = 6;
|
||||||
string cloud_service_account_uri = 7;
|
string cloud_service_account_uri = 7;
|
||||||
string kubernetes_version = 8;
|
string kubernetes_version = 8;
|
||||||
repeated SSHUserKey ssh_user_keys = 9;
|
// repeated SSHUserKey ssh_user_keys = 9; removed
|
||||||
bytes salt = 10;
|
bytes salt = 10;
|
||||||
bytes helm_deployments = 11;
|
bytes helm_deployments = 11;
|
||||||
repeated uint32 enforced_pcrs = 12;
|
repeated uint32 enforced_pcrs = 12;
|
||||||
|
@ -30,8 +30,3 @@ message InitResponse {
|
||||||
bytes owner_id = 2;
|
bytes owner_id = 2;
|
||||||
bytes cluster_id = 3;
|
bytes cluster_id = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message SSHUserKey {
|
|
||||||
string username = 1;
|
|
||||||
string public_key = 2;
|
|
||||||
}
|
|
||||||
|
|
|
@ -131,7 +131,6 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro
|
||||||
req.EnforceIdkeydigest,
|
req.EnforceIdkeydigest,
|
||||||
s.issuerWrapper.IDKeyDigest(),
|
s.issuerWrapper.IDKeyDigest(),
|
||||||
s.issuerWrapper.VMType() == vmtype.AzureCVM,
|
s.issuerWrapper.VMType() == vmtype.AzureCVM,
|
||||||
sshProtoKeysToMap(req.SshUserKeys),
|
|
||||||
req.HelmDeployments,
|
req.HelmDeployments,
|
||||||
req.ConformanceMode,
|
req.ConformanceMode,
|
||||||
s.log,
|
s.log,
|
||||||
|
@ -199,14 +198,6 @@ func (i *IssuerWrapper) IDKeyDigest() []byte {
|
||||||
return i.idkeydigest
|
return i.idkeydigest
|
||||||
}
|
}
|
||||||
|
|
||||||
func sshProtoKeysToMap(keys []*initproto.SSHUserKey) map[string]string {
|
|
||||||
keyMap := make(map[string]string)
|
|
||||||
for _, key := range keys {
|
|
||||||
keyMap[key.Username] = key.PublicKey
|
|
||||||
}
|
|
||||||
return keyMap
|
|
||||||
}
|
|
||||||
|
|
||||||
func deriveMeasurementValues(masterSecret, hkdfSalt []byte) (salt, clusterID []byte, err error) {
|
func deriveMeasurementValues(masterSecret, hkdfSalt []byte) (salt, clusterID []byte, err error) {
|
||||||
salt, err = crypto.GenerateRandomBytes(crypto.RNGLengthDefault)
|
salt, err = crypto.GenerateRandomBytes(crypto.RNGLengthDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -236,7 +227,6 @@ type ClusterInitializer interface {
|
||||||
enforceIDKeyDigest bool,
|
enforceIDKeyDigest bool,
|
||||||
idKeyDigest []byte,
|
idKeyDigest []byte,
|
||||||
azureCVM bool,
|
azureCVM bool,
|
||||||
sshUserKeys map[string]string,
|
|
||||||
helmDeployments []byte,
|
helmDeployments []byte,
|
||||||
conformanceMode bool,
|
conformanceMode bool,
|
||||||
log *logger.Logger,
|
log *logger.Logger,
|
||||||
|
|
|
@ -155,45 +155,6 @@ func TestInit(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSSHProtoKeysToMap(t *testing.T) {
|
|
||||||
testCases := map[string]struct {
|
|
||||||
keys []*initproto.SSHUserKey
|
|
||||||
want map[string]string
|
|
||||||
}{
|
|
||||||
"empty": {
|
|
||||||
keys: []*initproto.SSHUserKey{},
|
|
||||||
want: map[string]string{},
|
|
||||||
},
|
|
||||||
"one key": {
|
|
||||||
keys: []*initproto.SSHUserKey{
|
|
||||||
{Username: "key1", PublicKey: "key1-key"},
|
|
||||||
},
|
|
||||||
want: map[string]string{
|
|
||||||
"key1": "key1-key",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"two keys": {
|
|
||||||
keys: []*initproto.SSHUserKey{
|
|
||||||
{Username: "key1", PublicKey: "key1-key"},
|
|
||||||
{Username: "key2", PublicKey: "key2-key"},
|
|
||||||
},
|
|
||||||
want: map[string]string{
|
|
||||||
"key1": "key1-key",
|
|
||||||
"key2": "key2-key",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
got := sshProtoKeysToMap(tc.keys)
|
|
||||||
assert.Equal(tc.want, got)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSetupDisk(t *testing.T) {
|
func TestSetupDisk(t *testing.T) {
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
uuid string
|
uuid string
|
||||||
|
@ -289,7 +250,7 @@ type stubClusterInitializer struct {
|
||||||
|
|
||||||
func (i *stubClusterInitializer) InitCluster(
|
func (i *stubClusterInitializer) InitCluster(
|
||||||
context.Context, string, string, []byte, []uint32, bool, []byte, bool,
|
context.Context, string, string, []byte, []uint32, bool, []byte, bool,
|
||||||
map[string]string, []byte, bool, *logger.Logger,
|
[]byte, bool, *logger.Logger,
|
||||||
) ([]byte, error) {
|
) ([]byte, error) {
|
||||||
return i.initClusterKubeconfig, i.initClusterErr
|
return i.initClusterKubeconfig, i.initClusterErr
|
||||||
}
|
}
|
||||||
|
|
|
@ -330,11 +330,6 @@ func (k *KubernetesUtil) SetupGCPGuestAgent(kubectl Client, guestAgentDaemonset
|
||||||
return kubectl.Apply(guestAgentDaemonset, true)
|
return kubectl.Apply(guestAgentDaemonset, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetupAccessManager deploys the constellation-access-manager for deploying SSH keys on control-plane & worker nodes.
|
|
||||||
func (k *KubernetesUtil) SetupAccessManager(kubectl Client, accessManagerConfiguration kubernetes.Marshaler) error {
|
|
||||||
return kubectl.Apply(accessManagerConfiguration, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetupVerificationService deploys the verification service.
|
// SetupVerificationService deploys the verification service.
|
||||||
func (k *KubernetesUtil) SetupVerificationService(kubectl Client, verificationServiceConfiguration kubernetes.Marshaler) error {
|
func (k *KubernetesUtil) SetupVerificationService(kubectl Client, verificationServiceConfiguration kubernetes.Marshaler) error {
|
||||||
return kubectl.Apply(verificationServiceConfiguration, true)
|
return kubectl.Apply(verificationServiceConfiguration, true)
|
||||||
|
|
|
@ -1,203 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
apps "k8s.io/api/apps/v1"
|
|
||||||
k8s "k8s.io/api/core/v1"
|
|
||||||
rbac "k8s.io/api/rbac/v1"
|
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
const accessManagerNamespace = "kube-system"
|
|
||||||
|
|
||||||
// AccessManagerDeployment holds the configuration for the SSH user creation pods. User/Key definitions are stored in the ConfigMap, and the manager is deployed on each node by the DaemonSet.
|
|
||||||
type AccessManagerDeployment struct {
|
|
||||||
ConfigMap k8s.ConfigMap
|
|
||||||
ServiceAccount k8s.ServiceAccount
|
|
||||||
Role rbac.Role
|
|
||||||
RoleBinding rbac.RoleBinding
|
|
||||||
DaemonSet apps.DaemonSet
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAccessManagerDeployment creates a new *accessManagerDeployment which manages the SSH users for the cluster.
|
|
||||||
func NewAccessManagerDeployment(sshUsers map[string]string) *AccessManagerDeployment {
|
|
||||||
return &AccessManagerDeployment{
|
|
||||||
ServiceAccount: k8s.ServiceAccount{
|
|
||||||
TypeMeta: v1.TypeMeta{
|
|
||||||
APIVersion: "v1",
|
|
||||||
Kind: "ServiceAccount",
|
|
||||||
},
|
|
||||||
ObjectMeta: v1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"app.kubernetes.io/instance": "constellation",
|
|
||||||
"app.kubernetes.io/name": "constellation-access-manager",
|
|
||||||
"app.kubernetes.io/managed-by": "Constellation",
|
|
||||||
},
|
|
||||||
Name: "constellation-access-manager",
|
|
||||||
Namespace: accessManagerNamespace,
|
|
||||||
},
|
|
||||||
AutomountServiceAccountToken: proto.Bool(true),
|
|
||||||
},
|
|
||||||
ConfigMap: k8s.ConfigMap{
|
|
||||||
TypeMeta: v1.TypeMeta{
|
|
||||||
APIVersion: "v1",
|
|
||||||
Kind: "ConfigMap",
|
|
||||||
},
|
|
||||||
ObjectMeta: v1.ObjectMeta{
|
|
||||||
Name: "ssh-users",
|
|
||||||
Namespace: accessManagerNamespace,
|
|
||||||
},
|
|
||||||
Data: sshUsers,
|
|
||||||
},
|
|
||||||
DaemonSet: apps.DaemonSet{
|
|
||||||
TypeMeta: v1.TypeMeta{
|
|
||||||
APIVersion: "apps/v1",
|
|
||||||
Kind: "DaemonSet",
|
|
||||||
},
|
|
||||||
ObjectMeta: v1.ObjectMeta{
|
|
||||||
Name: "constellation-access-manager",
|
|
||||||
Namespace: accessManagerNamespace,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"app.kubernetes.io/instance": "constellation",
|
|
||||||
"app.kubernetes.io/name": "constellation-access-manager",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: apps.DaemonSetSpec{
|
|
||||||
Selector: &v1.LabelSelector{
|
|
||||||
MatchLabels: map[string]string{
|
|
||||||
"app.kubernetes.io/instance": "constellation",
|
|
||||||
"app.kubernetes.io/name": "constellation-access-manager",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Template: k8s.PodTemplateSpec{
|
|
||||||
ObjectMeta: v1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"app.kubernetes.io/instance": "constellation",
|
|
||||||
"app.kubernetes.io/name": "constellation-access-manager",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: k8s.PodSpec{
|
|
||||||
Tolerations: []k8s.Toleration{
|
|
||||||
{
|
|
||||||
Key: "node-role.kubernetes.io/master",
|
|
||||||
Operator: k8s.TolerationOpExists,
|
|
||||||
Effect: k8s.TaintEffectNoSchedule,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: "node-role.kubernetes.io/control-plane",
|
|
||||||
Operator: k8s.TolerationOpExists,
|
|
||||||
Effect: k8s.TaintEffectNoSchedule,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Containers: []k8s.Container{
|
|
||||||
{
|
|
||||||
Name: "pause",
|
|
||||||
Image: "gcr.io/google_containers/pause",
|
|
||||||
ImagePullPolicy: k8s.PullIfNotPresent,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
InitContainers: []k8s.Container{
|
|
||||||
{
|
|
||||||
Name: "constellation-access-manager",
|
|
||||||
Image: versions.AccessManagerImage,
|
|
||||||
VolumeMounts: []k8s.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: "host",
|
|
||||||
MountPath: "/host",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SecurityContext: &k8s.SecurityContext{
|
|
||||||
Capabilities: &k8s.Capabilities{
|
|
||||||
Add: []k8s.Capability{
|
|
||||||
"SYS_CHROOT",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ServiceAccountName: "constellation-access-manager",
|
|
||||||
Volumes: []k8s.Volume{
|
|
||||||
{
|
|
||||||
Name: "host",
|
|
||||||
VolumeSource: k8s.VolumeSource{
|
|
||||||
HostPath: &k8s.HostPathVolumeSource{
|
|
||||||
Path: "/",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Role: rbac.Role{
|
|
||||||
TypeMeta: v1.TypeMeta{
|
|
||||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
|
||||||
Kind: "Role",
|
|
||||||
},
|
|
||||||
ObjectMeta: v1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"app.kubernetes.io/instance": "constellation",
|
|
||||||
"app.kubernetes.io/name": "constellation-access-manager",
|
|
||||||
"app.kubernetes.io/managed-by": "Constellation",
|
|
||||||
},
|
|
||||||
Name: "constellation-access-manager",
|
|
||||||
Namespace: accessManagerNamespace,
|
|
||||||
},
|
|
||||||
Rules: []rbac.PolicyRule{
|
|
||||||
{
|
|
||||||
APIGroups: []string{""},
|
|
||||||
Resources: []string{
|
|
||||||
"configmaps",
|
|
||||||
},
|
|
||||||
ResourceNames: []string{
|
|
||||||
"ssh-users",
|
|
||||||
},
|
|
||||||
Verbs: []string{
|
|
||||||
"get",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
RoleBinding: rbac.RoleBinding{
|
|
||||||
TypeMeta: v1.TypeMeta{
|
|
||||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
|
||||||
Kind: "RoleBinding",
|
|
||||||
},
|
|
||||||
ObjectMeta: v1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"app.kubernetes.io/instance": "constellation",
|
|
||||||
"app.kubernetes.io/name": "constellation-access-manager",
|
|
||||||
"app.kubernetes.io/managed-by": "Constellation",
|
|
||||||
},
|
|
||||||
Name: "constellation-access-manager",
|
|
||||||
Namespace: accessManagerNamespace,
|
|
||||||
},
|
|
||||||
RoleRef: rbac.RoleRef{
|
|
||||||
APIGroup: "rbac.authorization.k8s.io",
|
|
||||||
Kind: "Role",
|
|
||||||
Name: "constellation-access-manager",
|
|
||||||
},
|
|
||||||
Subjects: []rbac.Subject{
|
|
||||||
{
|
|
||||||
Kind: "ServiceAccount",
|
|
||||||
Name: "constellation-access-manager",
|
|
||||||
Namespace: accessManagerNamespace,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal marshals the access-manager deployment as YAML documents.
|
|
||||||
func (c *AccessManagerDeployment) Marshal() ([]byte, error) {
|
|
||||||
return kubernetes.MarshalK8SResources(c)
|
|
||||||
}
|
|
|
@ -1,44 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/goleak"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
|
||||||
goleak.VerifyTestMain(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccessManagerMarshalUnmarshal(t *testing.T) {
|
|
||||||
require := require.New(t)
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
// Without data
|
|
||||||
accessManagerDeplNil := NewAccessManagerDeployment(nil)
|
|
||||||
data, err := accessManagerDeplNil.Marshal()
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
var recreated AccessManagerDeployment
|
|
||||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
|
||||||
assert.Equal(accessManagerDeplNil, &recreated)
|
|
||||||
|
|
||||||
// With data
|
|
||||||
sshUsers := make(map[string]string)
|
|
||||||
sshUsers["test-user"] = "ssh-rsa abcdefg"
|
|
||||||
accessManagerDeplNil = NewAccessManagerDeployment(sshUsers)
|
|
||||||
data, err = accessManagerDeplNil.Marshal()
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
|
||||||
assert.Equal(accessManagerDeplNil, &recreated)
|
|
||||||
}
|
|
|
@ -178,7 +178,7 @@ func NewGCPGuestAgentDaemonset() *GCPGuestAgentDaemonset {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Marshal marshals the access-manager deployment as YAML documents.
|
// Marshal marshals the gcp guest agent deployment as YAML documents.
|
||||||
func (c *GCPGuestAgentDaemonset) Marshal() ([]byte, error) {
|
func (c *GCPGuestAgentDaemonset) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(c)
|
return kubernetes.MarshalK8SResources(c)
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,6 @@ type clusterUtil interface {
|
||||||
InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error
|
InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error
|
||||||
InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger) error
|
InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger) error
|
||||||
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error
|
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error
|
||||||
SetupAccessManager(kubectl k8sapi.Client, sshUsers kubernetes.Marshaler) error
|
|
||||||
SetupKonnectivity(kubectl k8sapi.Client, konnectivityAgentsDaemonSet kubernetes.Marshaler) error
|
SetupKonnectivity(kubectl k8sapi.Client, konnectivityAgentsDaemonSet kubernetes.Marshaler) error
|
||||||
SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration kubernetes.Marshaler) error
|
SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration kubernetes.Marshaler) error
|
||||||
SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration kubernetes.Marshaler) error
|
SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration kubernetes.Marshaler) error
|
||||||
|
|
|
@ -86,7 +86,7 @@ func New(cloudProvider string, clusterUtil clusterUtil, configProvider configura
|
||||||
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
|
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
|
||||||
func (k *KubeWrapper) InitCluster(
|
func (k *KubeWrapper) InitCluster(
|
||||||
ctx context.Context, cloudServiceAccountURI, versionString string, measurementSalt []byte, enforcedPCRs []uint32,
|
ctx context.Context, cloudServiceAccountURI, versionString string, measurementSalt []byte, enforcedPCRs []uint32,
|
||||||
enforceIDKeyDigest bool, idKeyDigest []byte, azureCVM bool, sshUsers map[string]string,
|
enforceIDKeyDigest bool, idKeyDigest []byte, azureCVM bool,
|
||||||
helmReleasesRaw []byte, conformanceMode bool, log *logger.Logger,
|
helmReleasesRaw []byte, conformanceMode bool, log *logger.Logger,
|
||||||
) ([]byte, error) {
|
) ([]byte, error) {
|
||||||
k8sVersion, err := versions.NewValidK8sVersion(versionString)
|
k8sVersion, err := versions.NewValidK8sVersion(versionString)
|
||||||
|
@ -205,12 +205,6 @@ func (k *KubeWrapper) InitCluster(
|
||||||
return nil, fmt.Errorf("failed to setup internal ConfigMap: %w", err)
|
return nil, fmt.Errorf("failed to setup internal ConfigMap: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: remove access manager or re-enable with support for readonly /etc
|
|
||||||
// accessManager := resources.NewAccessManagerDeployment(sshUsers)
|
|
||||||
// if err := k.clusterUtil.SetupAccessManager(k.client, accessManager); err != nil {
|
|
||||||
// return nil, fmt.Errorf("failed to setup access-manager: %w", err)
|
|
||||||
// }
|
|
||||||
|
|
||||||
if err := k.clusterUtil.SetupVerificationService(
|
if err := k.clusterUtil.SetupVerificationService(
|
||||||
k.client, resources.NewVerificationDaemonSet(k.cloudProvider, controlPlaneEndpoint),
|
k.client, resources.NewVerificationDaemonSet(k.cloudProvider, controlPlaneEndpoint),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
|
|
@ -236,7 +236,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
|
|
||||||
_, err := kube.InitCluster(
|
_, err := kube.InitCluster(
|
||||||
context.Background(), serviceAccountURI, string(tc.k8sVersion),
|
context.Background(), serviceAccountURI, string(tc.k8sVersion),
|
||||||
nil, nil, false, nil, true, nil, []byte("{}"), false, logger.NewTest(t),
|
nil, nil, false, nil, true, []byte("{}"), false, logger.NewTest(t),
|
||||||
)
|
)
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
|
@ -419,7 +419,6 @@ type stubClusterUtil struct {
|
||||||
initClusterErr error
|
initClusterErr error
|
||||||
setupAutoscalingError error
|
setupAutoscalingError error
|
||||||
setupKonnectivityError error
|
setupKonnectivityError error
|
||||||
setupAccessManagerError error
|
|
||||||
setupVerificationServiceErr error
|
setupVerificationServiceErr error
|
||||||
setupGCPGuestAgentErr error
|
setupGCPGuestAgentErr error
|
||||||
setupOLMErr error
|
setupOLMErr error
|
||||||
|
@ -453,10 +452,6 @@ func (s *stubClusterUtil) SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgen
|
||||||
return s.setupGCPGuestAgentErr
|
return s.setupGCPGuestAgentErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubClusterUtil) SetupAccessManager(kubectl k8sapi.Client, accessManagerConfiguration kubernetes.Marshaler) error {
|
|
||||||
return s.setupAccessManagerError
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stubClusterUtil) SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration kubernetes.Marshaler) error {
|
func (s *stubClusterUtil) SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration kubernetes.Marshaler) error {
|
||||||
return s.setupVerificationServiceErr
|
return s.setupVerificationServiceErr
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@ import (
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/crypto"
|
"github.com/edgelesssys/constellation/v2/internal/crypto"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
|
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
|
||||||
grpcRetry "github.com/edgelesssys/constellation/v2/internal/grpc/retry"
|
grpcRetry "github.com/edgelesssys/constellation/v2/internal/grpc/retry"
|
||||||
|
@ -103,14 +102,6 @@ func initialize(cmd *cobra.Command, newDialer func(validator *cloudcmd.Validator
|
||||||
cmd.PrintErrf("License check failed: %v", err)
|
cmd.PrintErrf("License check failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var sshUsers []*ssh.UserKey
|
|
||||||
for _, user := range config.SSHUsers {
|
|
||||||
sshUsers = append(sshUsers, &ssh.UserKey{
|
|
||||||
Username: user.Username,
|
|
||||||
PublicKey: user.PublicKey,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
validator, err := cloudcmd.NewValidator(provider, config)
|
validator, err := cloudcmd.NewValidator(provider, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -141,7 +132,6 @@ func initialize(cmd *cobra.Command, newDialer func(validator *cloudcmd.Validator
|
||||||
UseExistingKek: false,
|
UseExistingKek: false,
|
||||||
CloudServiceAccountUri: serviceAccURI,
|
CloudServiceAccountUri: serviceAccURI,
|
||||||
KubernetesVersion: config.KubernetesVersion,
|
KubernetesVersion: config.KubernetesVersion,
|
||||||
SshUserKeys: ssh.ToProtoSlice(sshUsers),
|
|
||||||
HelmDeployments: helmDeployments,
|
HelmDeployments: helmDeployments,
|
||||||
EnforcedPcrs: getEnforcedPCRs(provider, config),
|
EnforcedPcrs: getEnforcedPCRs(provider, config),
|
||||||
EnforceIdkeydigest: getEnforceIDKeyDigest(provider, config),
|
EnforceIdkeydigest: getEnforceIDKeyDigest(provider, config),
|
||||||
|
|
|
@ -37,7 +37,7 @@ $ helm install cilium cilium/cilium --namespace=kube-system
|
||||||
```
|
```
|
||||||
|
|
||||||
After Cilium is installed, you can explore the features that Cilium has to
|
After Cilium is installed, you can explore the features that Cilium has to
|
||||||
offer from the [Getting Started Guides page](https://docs.cilium.io/en/latest/gettingstarted/k8s-install-default/#next-steps).
|
offer from the [Getting Started Guides page](https://docs.cilium.io/en/stable/gettingstarted/).
|
||||||
|
|
||||||
## Source Code
|
## Source Code
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ $ helm install cilium cilium/cilium --namespace=kube-system
|
||||||
```
|
```
|
||||||
|
|
||||||
After Cilium is installed, you can explore the features that Cilium has to
|
After Cilium is installed, you can explore the features that Cilium has to
|
||||||
offer from the [Getting Started Guides page](https://docs.cilium.io/en/latest/gettingstarted/).
|
offer from the [Getting Started Guides page](https://docs.cilium.io/en/stable/gettingstarted/).
|
||||||
|
|
||||||
{{ template "chart.maintainersSection" . }}
|
{{ template "chart.maintainersSection" . }}
|
||||||
|
|
||||||
|
|
|
@ -25,8 +25,6 @@ import (
|
||||||
platform "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
platform "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
gcpcloud "github.com/edgelesssys/constellation/v2/internal/cloud/gcp"
|
gcpcloud "github.com/edgelesssys/constellation/v2/internal/cloud/gcp"
|
||||||
qemucloud "github.com/edgelesssys/constellation/v2/internal/cloud/qemu"
|
qemucloud "github.com/edgelesssys/constellation/v2/internal/cloud/qemu"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/user"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||||
"github.com/spf13/afero"
|
"github.com/spf13/afero"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -47,7 +45,6 @@ func main() {
|
||||||
fs := afero.NewOsFs()
|
fs := afero.NewOsFs()
|
||||||
streamer := bootstrapper.NewFileStreamer(fs)
|
streamer := bootstrapper.NewFileStreamer(fs)
|
||||||
serviceManager := deploy.NewServiceManager(log.Named("serviceManager"))
|
serviceManager := deploy.NewServiceManager(log.Named("serviceManager"))
|
||||||
ssh := ssh.NewAccess(log, user.NewLinuxUserManager(fs))
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -89,9 +86,8 @@ func main() {
|
||||||
log.Errorf("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp)
|
log.Errorf("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp)
|
||||||
fetcher = fallback.Fetcher{}
|
fetcher = fallback.Fetcher{}
|
||||||
}
|
}
|
||||||
|
sched := metadata.NewScheduler(log.Named("scheduler"), fetcher, download)
|
||||||
sched := metadata.NewScheduler(log.Named("scheduler"), fetcher, ssh, download)
|
serv := server.New(log.Named("server"), serviceManager, streamer)
|
||||||
serv := server.New(log.Named("server"), ssh, serviceManager, streamer)
|
|
||||||
if err := deploy.DefaultServiceUnit(ctx, serviceManager); err != nil {
|
if err := deploy.DefaultServiceUnit(ctx, serviceManager); err != nil {
|
||||||
log.With(zap.Error(err)).Fatalf("Failed to create default service unit")
|
log.With(zap.Error(err)).Fatalf("Failed to create default service unit")
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,8 +28,8 @@ import (
|
||||||
func newDeployCmd() *cobra.Command {
|
func newDeployCmd() *cobra.Command {
|
||||||
deployCmd := &cobra.Command{
|
deployCmd := &cobra.Command{
|
||||||
Use: "deploy",
|
Use: "deploy",
|
||||||
Short: "Deploys a self-compiled bootstrapper binary and SSH keys on the current constellation",
|
Short: "Deploys a self-compiled bootstrapper binary on the current constellation",
|
||||||
Long: `Deploys a self-compiled bootstrapper binary and SSH keys on the current constellation.
|
Long: `Deploys a self-compiled bootstrapper binary on the current constellation.
|
||||||
Uses config provided by --config and reads constellation config from its default location.
|
Uses config provided by --config and reads constellation config from its default location.
|
||||||
If required, you can override the IP addresses that are used for a deployment by specifying "--ips" and a list of IP addresses.
|
If required, you can override the IP addresses that are used for a deployment by specifying "--ips" and a list of IP addresses.
|
||||||
Specifying --bootstrapper will upload the bootstrapper from the specified path.`,
|
Specifying --bootstrapper will upload the bootstrapper from the specified path.`,
|
||||||
|
@ -88,7 +88,6 @@ func deploy(cmd *cobra.Command, fileHandler file.Handler, constellationConfig *c
|
||||||
debugdEndpoint: net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)),
|
debugdEndpoint: net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)),
|
||||||
bootstrapperPath: bootstrapperPath,
|
bootstrapperPath: bootstrapperPath,
|
||||||
reader: reader,
|
reader: reader,
|
||||||
authorizedKeys: constellationConfig.SSHUsers,
|
|
||||||
}
|
}
|
||||||
if err := deployOnEndpoint(cmd.Context(), input); err != nil {
|
if err := deployOnEndpoint(cmd.Context(), input); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -102,10 +101,9 @@ type deployOnEndpointInput struct {
|
||||||
debugdEndpoint string
|
debugdEndpoint string
|
||||||
bootstrapperPath string
|
bootstrapperPath string
|
||||||
reader fileToStreamReader
|
reader fileToStreamReader
|
||||||
authorizedKeys []config.UserKey
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// deployOnEndpoint deploys SSH public keys and a locally built bootstrapper binary to a debugd endpoint.
|
// deployOnEndpoint deploys a custom built bootstrapper binary to a debugd endpoint.
|
||||||
func deployOnEndpoint(ctx context.Context, in deployOnEndpointInput) error {
|
func deployOnEndpoint(ctx context.Context, in deployOnEndpointInput) error {
|
||||||
log.Printf("Deploying on %v\n", in.debugdEndpoint)
|
log.Printf("Deploying on %v\n", in.debugdEndpoint)
|
||||||
dialCTX, cancel := context.WithTimeout(ctx, debugd.GRPCTimeout)
|
dialCTX, cancel := context.WithTimeout(ctx, debugd.GRPCTimeout)
|
||||||
|
@ -117,23 +115,6 @@ func deployOnEndpoint(ctx context.Context, in deployOnEndpointInput) error {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
client := pb.NewDebugdClient(conn)
|
client := pb.NewDebugdClient(conn)
|
||||||
|
|
||||||
if len(in.authorizedKeys) > 0 {
|
|
||||||
log.Println("Warning: Uploading authorized keys is currently disabled.")
|
|
||||||
}
|
|
||||||
// TODO (stateless-ssh): re-enable once ssh keys can be deployed on readonly rootfs.
|
|
||||||
// log.Println("Uploading authorized keys")
|
|
||||||
// pbKeys := []*pb.AuthorizedKey{}
|
|
||||||
// for _, key := range in.authorizedKeys {
|
|
||||||
// pbKeys = append(pbKeys, &pb.AuthorizedKey{
|
|
||||||
// Username: key.Username,
|
|
||||||
// KeyValue: key.PublicKey,
|
|
||||||
// })
|
|
||||||
// }
|
|
||||||
// authorizedKeysResponse, err := client.UploadAuthorizedKeys(ctx, &pb.UploadAuthorizedKeysRequest{Keys: pbKeys}, grpc.WaitForReady(true))
|
|
||||||
// if err != nil || authorizedKeysResponse.Status != pb.UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_SUCCESS {
|
|
||||||
// return fmt.Errorf("uploading authorized keys to instance %v failed: %v / %w", in.debugdEndpoint, authorizedKeysResponse, err)
|
|
||||||
// }
|
|
||||||
|
|
||||||
stream, err := client.UploadBootstrapper(ctx, grpc.WaitForReady(true))
|
stream, err := client.UploadBootstrapper(ctx, grpc.WaitForReady(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("starting bootstrapper upload to instance %v: %w", in.debugdEndpoint, err)
|
return fmt.Errorf("starting bootstrapper upload to instance %v: %w", in.debugdEndpoint, err)
|
||||||
|
|
|
@ -12,7 +12,6 @@ import "time"
|
||||||
const (
|
const (
|
||||||
DebugdMetadataFlag = "constellation-debugd"
|
DebugdMetadataFlag = "constellation-debugd"
|
||||||
GRPCTimeout = 5 * time.Minute
|
GRPCTimeout = 5 * time.Minute
|
||||||
SSHCheckInterval = 30 * time.Second
|
|
||||||
DiscoverDebugdInterval = 30 * time.Second
|
DiscoverDebugdInterval = 30 * time.Second
|
||||||
BootstrapperDownloadRetryBackoff = 1 * time.Minute
|
BootstrapperDownloadRetryBackoff = 1 * time.Minute
|
||||||
BootstrapperDeployFilename = "/run/state/bin/bootstrapper"
|
BootstrapperDeployFilename = "/run/state/bin/bootstrapper"
|
||||||
|
|
|
@ -17,7 +17,6 @@ import (
|
||||||
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd"
|
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd"
|
||||||
pb "github.com/edgelesssys/constellation/v2/debugd/service"
|
pb "github.com/edgelesssys/constellation/v2/debugd/service"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
@ -45,20 +44,20 @@ func New(log *logger.Logger, dialer NetDialer, serviceManager serviceManager, wr
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadDeployment will open a new grpc connection to another instance, attempting to download a bootstrapper from that instance.
|
// DownloadDeployment will open a new grpc connection to another instance, attempting to download a bootstrapper from that instance.
|
||||||
func (d *Download) DownloadDeployment(ctx context.Context, ip string) ([]ssh.UserKey, error) {
|
func (d *Download) DownloadDeployment(ctx context.Context, ip string) error {
|
||||||
log := d.log.With(zap.String("ip", ip))
|
log := d.log.With(zap.String("ip", ip))
|
||||||
serverAddr := net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort))
|
serverAddr := net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort))
|
||||||
|
|
||||||
// only retry download from same endpoint after backoff
|
// only retry download from same endpoint after backoff
|
||||||
if lastAttempt, ok := d.attemptedDownloads[serverAddr]; ok && time.Since(lastAttempt) < debugd.BootstrapperDownloadRetryBackoff {
|
if lastAttempt, ok := d.attemptedDownloads[serverAddr]; ok && time.Since(lastAttempt) < debugd.BootstrapperDownloadRetryBackoff {
|
||||||
return nil, fmt.Errorf("download failed too recently: %v / %v", time.Since(lastAttempt), debugd.BootstrapperDownloadRetryBackoff)
|
return fmt.Errorf("download failed too recently: %v / %v", time.Since(lastAttempt), debugd.BootstrapperDownloadRetryBackoff)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Connecting to server")
|
log.Infof("Connecting to server")
|
||||||
d.attemptedDownloads[serverAddr] = time.Now()
|
d.attemptedDownloads[serverAddr] = time.Now()
|
||||||
conn, err := d.dial(ctx, serverAddr)
|
conn, err := d.dial(ctx, serverAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("connecting to other instance via gRPC: %w", err)
|
return fmt.Errorf("connecting to other instance via gRPC: %w", err)
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
client := pb.NewDebugdClient(conn)
|
client := pb.NewDebugdClient(conn)
|
||||||
|
@ -66,34 +65,23 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) ([]ssh.Use
|
||||||
log.Infof("Trying to download bootstrapper")
|
log.Infof("Trying to download bootstrapper")
|
||||||
stream, err := client.DownloadBootstrapper(ctx, &pb.DownloadBootstrapperRequest{})
|
stream, err := client.DownloadBootstrapper(ctx, &pb.DownloadBootstrapperRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("starting bootstrapper download from other instance: %w", err)
|
return fmt.Errorf("starting bootstrapper download from other instance: %w", err)
|
||||||
}
|
}
|
||||||
if err := d.writer.WriteStream(debugd.BootstrapperDeployFilename, stream, true); err != nil {
|
if err := d.writer.WriteStream(debugd.BootstrapperDeployFilename, stream, true); err != nil {
|
||||||
return nil, fmt.Errorf("streaming bootstrapper from other instance: %w", err)
|
return fmt.Errorf("streaming bootstrapper from other instance: %w", err)
|
||||||
}
|
}
|
||||||
log.Infof("Successfully downloaded bootstrapper")
|
log.Infof("Successfully downloaded bootstrapper")
|
||||||
|
|
||||||
log.Infof("Trying to download ssh keys")
|
|
||||||
resp, err := client.DownloadAuthorizedKeys(ctx, &pb.DownloadAuthorizedKeysRequest{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("downloading authorized keys: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var keys []ssh.UserKey
|
|
||||||
for _, key := range resp.Keys {
|
|
||||||
keys = append(keys, ssh.UserKey{Username: key.Username, PublicKey: key.KeyValue})
|
|
||||||
}
|
|
||||||
|
|
||||||
// after the upload succeeds, try to restart the bootstrapper
|
// after the upload succeeds, try to restart the bootstrapper
|
||||||
restartAction := ServiceManagerRequest{
|
restartAction := ServiceManagerRequest{
|
||||||
Unit: debugd.BootstrapperSystemdUnitName,
|
Unit: debugd.BootstrapperSystemdUnitName,
|
||||||
Action: Restart,
|
Action: Restart,
|
||||||
}
|
}
|
||||||
if err := d.serviceManager.SystemdAction(ctx, restartAction); err != nil {
|
if err := d.serviceManager.SystemdAction(ctx, restartAction); err != nil {
|
||||||
return nil, fmt.Errorf("restarting bootstrapper: %w", err)
|
return fmt.Errorf("restarting bootstrapper: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return keys, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Download) dial(ctx context.Context, target string) (*grpc.ClientConn, error) {
|
func (d *Download) dial(ctx context.Context, target string) (*grpc.ClientConn, error) {
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd"
|
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd"
|
||||||
pb "github.com/edgelesssys/constellation/v2/debugd/service"
|
pb "github.com/edgelesssys/constellation/v2/debugd/service"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
|
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -48,12 +47,10 @@ func TestDownloadBootstrapper(t *testing.T) {
|
||||||
wantFile bool
|
wantFile bool
|
||||||
wantSystemdAction bool
|
wantSystemdAction bool
|
||||||
wantDeployed bool
|
wantDeployed bool
|
||||||
wantKeys []ssh.UserKey
|
|
||||||
}{
|
}{
|
||||||
"download works": {
|
"download works": {
|
||||||
server: fakeDownloadServer{
|
server: fakeDownloadServer{
|
||||||
chunks: [][]byte{[]byte("test")},
|
chunks: [][]byte{[]byte("test")},
|
||||||
keys: []*pb.AuthorizedKey{{Username: "name", KeyValue: "key"}},
|
|
||||||
},
|
},
|
||||||
attemptedDownloads: map[string]time.Time{},
|
attemptedDownloads: map[string]time.Time{},
|
||||||
wantChunks: [][]byte{[]byte("test")},
|
wantChunks: [][]byte{[]byte("test")},
|
||||||
|
@ -61,7 +58,6 @@ func TestDownloadBootstrapper(t *testing.T) {
|
||||||
wantFile: true,
|
wantFile: true,
|
||||||
wantSystemdAction: true,
|
wantSystemdAction: true,
|
||||||
wantDeployed: true,
|
wantDeployed: true,
|
||||||
wantKeys: []ssh.UserKey{{Username: "name", PublicKey: "key"}},
|
|
||||||
},
|
},
|
||||||
"second download is not attempted twice": {
|
"second download is not attempted twice": {
|
||||||
server: fakeDownloadServer{chunks: [][]byte{[]byte("test")}},
|
server: fakeDownloadServer{chunks: [][]byte{[]byte("test")}},
|
||||||
|
@ -73,14 +69,6 @@ func TestDownloadBootstrapper(t *testing.T) {
|
||||||
attemptedDownloads: map[string]time.Time{},
|
attemptedDownloads: map[string]time.Time{},
|
||||||
wantDownloadErr: true,
|
wantDownloadErr: true,
|
||||||
},
|
},
|
||||||
"download key error": {
|
|
||||||
server: fakeDownloadServer{
|
|
||||||
chunks: [][]byte{[]byte("test")},
|
|
||||||
downloadAuthorizedKeysErr: someErr,
|
|
||||||
},
|
|
||||||
attemptedDownloads: map[string]time.Time{},
|
|
||||||
wantDownloadErr: true,
|
|
||||||
},
|
|
||||||
"service restart error is detected": {
|
"service restart error is detected": {
|
||||||
server: fakeDownloadServer{chunks: [][]byte{[]byte("test")}},
|
server: fakeDownloadServer{chunks: [][]byte{[]byte("test")}},
|
||||||
serviceManager: stubServiceManager{systemdActionErr: someErr},
|
serviceManager: stubServiceManager{systemdActionErr: someErr},
|
||||||
|
@ -115,7 +103,7 @@ func TestDownloadBootstrapper(t *testing.T) {
|
||||||
attemptedDownloads: tc.attemptedDownloads,
|
attemptedDownloads: tc.attemptedDownloads,
|
||||||
}
|
}
|
||||||
|
|
||||||
keys, err := download.DownloadDeployment(context.Background(), ip)
|
err := download.DownloadDeployment(context.Background(), ip)
|
||||||
|
|
||||||
if tc.wantDownloadErr {
|
if tc.wantDownloadErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
|
@ -135,7 +123,6 @@ func TestDownloadBootstrapper(t *testing.T) {
|
||||||
tc.serviceManager.requests,
|
tc.serviceManager.requests,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
assert.Equal(tc.wantKeys, keys)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -171,10 +158,8 @@ func (f *fakeStreamToFileWriter) WriteStream(filename string, stream bootstrappe
|
||||||
|
|
||||||
// fakeDownloadServer implements DebugdServer; only fakes DownloadBootstrapper, panics on every other rpc.
|
// fakeDownloadServer implements DebugdServer; only fakes DownloadBootstrapper, panics on every other rpc.
|
||||||
type fakeDownloadServer struct {
|
type fakeDownloadServer struct {
|
||||||
chunks [][]byte
|
chunks [][]byte
|
||||||
downladErr error
|
downladErr error
|
||||||
keys []*pb.AuthorizedKey
|
|
||||||
downloadAuthorizedKeysErr error
|
|
||||||
|
|
||||||
pb.UnimplementedDebugdServer
|
pb.UnimplementedDebugdServer
|
||||||
}
|
}
|
||||||
|
@ -187,7 +172,3 @@ func (s *fakeDownloadServer) DownloadBootstrapper(request *pb.DownloadBootstrapp
|
||||||
}
|
}
|
||||||
return s.downladErr
|
return s.downladErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *fakeDownloadServer) DownloadAuthorizedKeys(context.Context, *pb.DownloadAuthorizedKeysRequest) (*pb.DownloadAuthorizedKeysResponse, error) {
|
|
||||||
return &pb.DownloadAuthorizedKeysResponse{Keys: s.keys}, s.downloadAuthorizedKeysErr
|
|
||||||
}
|
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/role"
|
"github.com/edgelesssys/constellation/v2/internal/role"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,7 +24,7 @@ type providerMetadata interface {
|
||||||
GetLoadBalancerEndpoint(ctx context.Context) (string, error)
|
GetLoadBalancerEndpoint(ctx context.Context) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetcher checks the metadata service to search for instances that were set up for debugging and cloud provider specific SSH keys.
|
// Fetcher checks the metadata service to search for instances that were set up for debugging.
|
||||||
type Fetcher struct {
|
type Fetcher struct {
|
||||||
metaAPI providerMetadata
|
metaAPI providerMetadata
|
||||||
}
|
}
|
||||||
|
@ -92,20 +91,3 @@ func (f *Fetcher) DiscoverLoadbalancerIP(ctx context.Context) (string, error) {
|
||||||
|
|
||||||
return lbIP, nil
|
return lbIP, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchSSHKeys will query the metadata of the current instance and deploys any SSH keys found.
|
|
||||||
func (f *Fetcher) FetchSSHKeys(ctx context.Context) ([]ssh.UserKey, error) {
|
|
||||||
self, err := f.metaAPI.Self(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("retrieving ssh keys from cloud provider metadata: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
keys := []ssh.UserKey{}
|
|
||||||
for username, userKeys := range self.SSHKeys {
|
|
||||||
for _, keyValue := range userKeys {
|
|
||||||
keys = append(keys, ssh.UserKey{Username: username, PublicKey: keyValue})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/role"
|
"github.com/edgelesssys/constellation/v2/internal/role"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -166,57 +165,6 @@ func TestDiscoverLoadbalancerIP(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFetchSSHKeys(t *testing.T) {
|
|
||||||
err := errors.New("some err")
|
|
||||||
|
|
||||||
testCases := map[string]struct {
|
|
||||||
meta stubMetadata
|
|
||||||
wantKeys []ssh.UserKey
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
"fetch works": {
|
|
||||||
meta: stubMetadata{
|
|
||||||
selfRes: metadata.InstanceMetadata{
|
|
||||||
Name: "name",
|
|
||||||
ProviderID: "provider-id",
|
|
||||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantKeys: []ssh.UserKey{
|
|
||||||
{
|
|
||||||
Username: "bob",
|
|
||||||
PublicKey: "ssh-rsa bobskey",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"retrieve fails": {
|
|
||||||
meta: stubMetadata{
|
|
||||||
selfErr: err,
|
|
||||||
},
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
require := require.New(t)
|
|
||||||
|
|
||||||
fetcher := Fetcher{
|
|
||||||
metaAPI: &tc.meta,
|
|
||||||
}
|
|
||||||
keys, err := fetcher.FetchSSHKeys(context.Background())
|
|
||||||
|
|
||||||
if tc.wantErr {
|
|
||||||
assert.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
require.NoError(err)
|
|
||||||
assert.ElementsMatch(tc.wantKeys, keys)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type stubMetadata struct {
|
type stubMetadata struct {
|
||||||
listRes []metadata.InstanceMetadata
|
listRes []metadata.InstanceMetadata
|
||||||
listErr error
|
listErr error
|
||||||
|
|
|
@ -9,7 +9,6 @@ package fallback
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/role"
|
"github.com/edgelesssys/constellation/v2/internal/role"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -30,8 +29,3 @@ func (f Fetcher) DiscoverDebugdIPs(ctx context.Context) ([]string, error) {
|
||||||
func (f Fetcher) DiscoverLoadbalancerIP(ctx context.Context) (string, error) {
|
func (f Fetcher) DiscoverLoadbalancerIP(ctx context.Context) (string, error) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchSSHKeys for fallback fetcher does not try to fetch ssh keys.
|
|
||||||
func (f Fetcher) FetchSSHKeys(ctx context.Context) ([]ssh.UserKey, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -27,13 +27,3 @@ func TestDiscoverDebugdIPs(t *testing.T) {
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.Empty(ips)
|
assert.Empty(ips)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFetchSSHKeys(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
fetcher := Fetcher{}
|
|
||||||
keys, err := fetcher.FetchSSHKeys(context.Background())
|
|
||||||
|
|
||||||
assert.NoError(err)
|
|
||||||
assert.Empty(keys)
|
|
||||||
}
|
|
||||||
|
|
|
@ -14,17 +14,15 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd"
|
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/role"
|
"github.com/edgelesssys/constellation/v2/internal/role"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Fetcher retrieves other debugd IPs and SSH keys from cloud provider metadata.
|
// Fetcher retrieves other debugd IPs from cloud provider metadata.
|
||||||
type Fetcher interface {
|
type Fetcher interface {
|
||||||
Role(ctx context.Context) (role.Role, error)
|
Role(ctx context.Context) (role.Role, error)
|
||||||
DiscoverDebugdIPs(ctx context.Context) ([]string, error)
|
DiscoverDebugdIPs(ctx context.Context) ([]string, error)
|
||||||
FetchSSHKeys(ctx context.Context) ([]ssh.UserKey, error)
|
|
||||||
DiscoverLoadbalancerIP(ctx context.Context) (string, error)
|
DiscoverLoadbalancerIP(ctx context.Context) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,29 +30,24 @@ type Fetcher interface {
|
||||||
type Scheduler struct {
|
type Scheduler struct {
|
||||||
log *logger.Logger
|
log *logger.Logger
|
||||||
fetcher Fetcher
|
fetcher Fetcher
|
||||||
ssh sshDeployer
|
|
||||||
downloader downloader
|
downloader downloader
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewScheduler returns a new scheduler.
|
// NewScheduler returns a new scheduler.
|
||||||
func NewScheduler(log *logger.Logger, fetcher Fetcher, ssh sshDeployer, downloader downloader) *Scheduler {
|
func NewScheduler(log *logger.Logger, fetcher Fetcher, downloader downloader) *Scheduler {
|
||||||
return &Scheduler{
|
return &Scheduler{
|
||||||
log: log,
|
log: log,
|
||||||
fetcher: fetcher,
|
fetcher: fetcher,
|
||||||
ssh: ssh,
|
|
||||||
downloader: downloader,
|
downloader: downloader,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start will start the loops for discovering debugd endpoints and ssh keys.
|
// Start the loops for discovering debugd endpoints.
|
||||||
func (s *Scheduler) Start(ctx context.Context, wg *sync.WaitGroup) {
|
func (s *Scheduler) Start(ctx context.Context, wg *sync.WaitGroup) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go s.discoveryLoop(ctx, wg)
|
go s.discoveryLoop(ctx, wg)
|
||||||
// TODO (stateless-ssh): re-enable once
|
|
||||||
// ssh keys can be deployed on readonly rootfs
|
|
||||||
// go s.sshLoop(ctx, wg)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// discoveryLoop discovers new debugd endpoints from cloud-provider metadata periodically.
|
// discoveryLoop discovers new debugd endpoints from cloud-provider metadata periodically.
|
||||||
|
@ -91,36 +84,11 @@ func (s *Scheduler) discoveryLoop(ctx context.Context, wg *sync.WaitGroup) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sshLoop discovers new ssh keys from cloud provider metadata periodically.
|
|
||||||
// TODO (stateless-ssh): re-enable once ssh keys can be deployed on readonly rootfs
|
|
||||||
// func (s *Scheduler) sshLoop(ctx context.Context, wg *sync.WaitGroup) {
|
|
||||||
// defer wg.Done()
|
|
||||||
|
|
||||||
// ticker := time.NewTicker(debugd.SSHCheckInterval)
|
|
||||||
// defer ticker.Stop()
|
|
||||||
// for {
|
|
||||||
// keys, err := s.fetcher.FetchSSHKeys(ctx)
|
|
||||||
// if err != nil {
|
|
||||||
// s.log.With(zap.Error(err)).Errorf("Fetching SSH keys failed")
|
|
||||||
// } else {
|
|
||||||
// s.deploySSHKeys(ctx, keys)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// select {
|
|
||||||
// case <-ticker.C:
|
|
||||||
// case <-ctx.Done():
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// downloadDeployment tries to download deployment from a list of ips and logs errors encountered.
|
// downloadDeployment tries to download deployment from a list of ips and logs errors encountered.
|
||||||
func (s *Scheduler) downloadDeployment(ctx context.Context, ips []string) (success bool) {
|
func (s *Scheduler) downloadDeployment(ctx context.Context, ips []string) (success bool) {
|
||||||
for _, ip := range ips {
|
for _, ip := range ips {
|
||||||
_, err := s.downloader.DownloadDeployment(ctx, ip)
|
err := s.downloader.DownloadDeployment(ctx, ip)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// TODO (stateless-ssh): re-enable once ssh keys can be deployed on readonly rootfs
|
|
||||||
// s.deploySSHKeys(ctx, keys)
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if errors.Is(err, fs.ErrExist) {
|
if errors.Is(err, fs.ErrExist) {
|
||||||
|
@ -133,22 +101,6 @@ func (s *Scheduler) downloadDeployment(ctx context.Context, ips []string) (succe
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO (stateless-ssh): re-enable once ssh keys can be deployed on readonly rootfs
|
|
||||||
// deploySSHKeys tries to deploy a list of SSH keys and logs errors encountered.
|
|
||||||
// func (s *Scheduler) deploySSHKeys(ctx context.Context, keys []ssh.UserKey) {
|
|
||||||
// for _, key := range keys {
|
|
||||||
// err := s.ssh.DeployAuthorizedKey(ctx, key)
|
|
||||||
// if err != nil {
|
|
||||||
// s.log.With(zap.Error(err), zap.Any("key", key)).Errorf("Deploying SSH key failed")
|
|
||||||
// continue
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
type downloader interface {
|
type downloader interface {
|
||||||
DownloadDeployment(ctx context.Context, ip string) ([]ssh.UserKey, error)
|
DownloadDeployment(ctx context.Context, ip string) error
|
||||||
}
|
|
||||||
|
|
||||||
type sshDeployer interface {
|
|
||||||
DeployAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/role"
|
"github.com/edgelesssys/constellation/v2/internal/role"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -29,20 +28,11 @@ func TestSchedulerStart(t *testing.T) {
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
fetcher stubFetcher
|
fetcher stubFetcher
|
||||||
ssh stubSSHDeployer
|
|
||||||
downloader stubDownloader
|
downloader stubDownloader
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
wantSSHKeys []ssh.UserKey
|
|
||||||
wantDebugdDownloads []string
|
wantDebugdDownloads []string
|
||||||
}{
|
}{
|
||||||
"scheduler works and calls fetcher functions at least once": {},
|
"scheduler works and calls fetcher functions at least once": {},
|
||||||
// TODO (stateless-ssh): re-enable once ssh keys can be deployed on readonly rootfs.
|
|
||||||
// "ssh keys are fetched": {
|
|
||||||
// fetcher: stubFetcher{
|
|
||||||
// keys: []ssh.UserKey{{Username: "test", PublicKey: "testkey"}},
|
|
||||||
// },
|
|
||||||
// wantSSHKeys: []ssh.UserKey{{Username: "test", PublicKey: "testkey"}},
|
|
||||||
// },
|
|
||||||
"download for discovered debugd ips is started": {
|
"download for discovered debugd ips is started": {
|
||||||
fetcher: stubFetcher{
|
fetcher: stubFetcher{
|
||||||
ips: []string{"192.0.2.1", "192.0.2.2"},
|
ips: []string{"192.0.2.1", "192.0.2.2"},
|
||||||
|
@ -59,10 +49,6 @@ func TestSchedulerStart(t *testing.T) {
|
||||||
"endpoint discovery can fail": {
|
"endpoint discovery can fail": {
|
||||||
fetcher: stubFetcher{discoverErr: someErr},
|
fetcher: stubFetcher{discoverErr: someErr},
|
||||||
},
|
},
|
||||||
// TODO (stateless-ssh): re-enable once ssh keys can be deployed on readonly rootfs.
|
|
||||||
// "ssh key fetch can fail": {
|
|
||||||
// fetcher: stubFetcher{fetchSSHKeysErr: someErr},
|
|
||||||
// },
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
|
@ -75,31 +61,23 @@ func TestSchedulerStart(t *testing.T) {
|
||||||
scheduler := Scheduler{
|
scheduler := Scheduler{
|
||||||
log: logger.NewTest(t),
|
log: logger.NewTest(t),
|
||||||
fetcher: &tc.fetcher,
|
fetcher: &tc.fetcher,
|
||||||
ssh: &tc.ssh,
|
|
||||||
downloader: &tc.downloader,
|
downloader: &tc.downloader,
|
||||||
}
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go scheduler.Start(ctx, wg)
|
go scheduler.Start(ctx, wg)
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
// TODO (stateless-ssh): re-enable once ssh keys can be deployed on readonly rootfs.
|
|
||||||
// assert.Equal(tc.wantSSHKeys, tc.ssh.sshKeys)
|
|
||||||
assert.Equal(tc.wantDebugdDownloads, tc.downloader.ips)
|
assert.Equal(tc.wantDebugdDownloads, tc.downloader.ips)
|
||||||
assert.Greater(tc.fetcher.discoverCalls, 0)
|
assert.Greater(tc.fetcher.discoverCalls, 0)
|
||||||
// TODO (stateless-ssh): re-enable once ssh keys can be deployed on readonly rootfs.
|
|
||||||
// assert.Greater(tc.fetcher.fetchSSHKeysCalls, 0)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type stubFetcher struct {
|
type stubFetcher struct {
|
||||||
discoverCalls int
|
discoverCalls int
|
||||||
fetchSSHKeysCalls int
|
|
||||||
|
|
||||||
ips []string
|
ips []string
|
||||||
keys []ssh.UserKey
|
discoverErr error
|
||||||
discoverErr error
|
|
||||||
fetchSSHKeysErr error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubFetcher) Role(_ context.Context) (role.Role, error) {
|
func (s *stubFetcher) Role(_ context.Context) (role.Role, error) {
|
||||||
|
@ -111,34 +89,16 @@ func (s *stubFetcher) DiscoverDebugdIPs(ctx context.Context) ([]string, error) {
|
||||||
return s.ips, s.discoverErr
|
return s.ips, s.discoverErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubFetcher) FetchSSHKeys(ctx context.Context) ([]ssh.UserKey, error) {
|
|
||||||
s.fetchSSHKeysCalls++
|
|
||||||
return s.keys, s.fetchSSHKeysErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stubFetcher) DiscoverLoadbalancerIP(ctx context.Context) (string, error) {
|
func (s *stubFetcher) DiscoverLoadbalancerIP(ctx context.Context) (string, error) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type stubSSHDeployer struct {
|
|
||||||
sshKeys []ssh.UserKey
|
|
||||||
|
|
||||||
deployErr error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stubSSHDeployer) DeployAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error {
|
|
||||||
s.sshKeys = append(s.sshKeys, sshKey)
|
|
||||||
|
|
||||||
return s.deployErr
|
|
||||||
}
|
|
||||||
|
|
||||||
type stubDownloader struct {
|
type stubDownloader struct {
|
||||||
ips []string
|
ips []string
|
||||||
downloadErr error
|
downloadErr error
|
||||||
keys []ssh.UserKey
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubDownloader) DownloadDeployment(ctx context.Context, ip string) ([]ssh.UserKey, error) {
|
func (s *stubDownloader) DownloadDeployment(ctx context.Context, ip string) error {
|
||||||
s.ips = append(s.ips, ip)
|
s.ips = append(s.ips, ip)
|
||||||
return s.keys, s.downloadErr
|
return s.downloadErr
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd/deploy"
|
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd/deploy"
|
||||||
pb "github.com/edgelesssys/constellation/v2/debugd/service"
|
pb "github.com/edgelesssys/constellation/v2/debugd/service"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
@ -30,48 +29,20 @@ import (
|
||||||
|
|
||||||
type debugdServer struct {
|
type debugdServer struct {
|
||||||
log *logger.Logger
|
log *logger.Logger
|
||||||
ssh sshDeployer
|
|
||||||
serviceManager serviceManager
|
serviceManager serviceManager
|
||||||
streamer streamer
|
streamer streamer
|
||||||
pb.UnimplementedDebugdServer
|
pb.UnimplementedDebugdServer
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new debugdServer according to the gRPC spec.
|
// New creates a new debugdServer according to the gRPC spec.
|
||||||
func New(log *logger.Logger, ssh sshDeployer, serviceManager serviceManager, streamer streamer) pb.DebugdServer {
|
func New(log *logger.Logger, serviceManager serviceManager, streamer streamer) pb.DebugdServer {
|
||||||
return &debugdServer{
|
return &debugdServer{
|
||||||
log: log,
|
log: log,
|
||||||
ssh: ssh,
|
|
||||||
serviceManager: serviceManager,
|
serviceManager: serviceManager,
|
||||||
streamer: streamer,
|
streamer: streamer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO (stateless-ssh): re-enable once ssh keys can be deployed on readonly rootfs.
|
|
||||||
// UploadAuthorizedKeys receives a list of authorized keys and forwards them to a channel.
|
|
||||||
//
|
|
||||||
// func (s *debugdServer) UploadAuthorizedKeys(ctx context.Context, in *pb.UploadAuthorizedKeysRequest) (*pb.UploadAuthorizedKeysResponse, error) {
|
|
||||||
// s.log.Infof("Uploading authorized keys")
|
|
||||||
// for _, key := range in.Keys {
|
|
||||||
// if err := s.ssh.DeployAuthorizedKey(ctx, ssh.UserKey{Username: key.Username, PublicKey: key.KeyValue}); err != nil {
|
|
||||||
// s.log.With(zap.Error(err)).Errorf("Uploading authorized keys failed")
|
|
||||||
// return &pb.UploadAuthorizedKeysResponse{
|
|
||||||
// Status: pb.UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_FAILURE,
|
|
||||||
// }, nil
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// return &pb.UploadAuthorizedKeysResponse{
|
|
||||||
// Status: pb.UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_SUCCESS,
|
|
||||||
// }, nil
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// UploadAuthorizedKeys receives a list of authorized keys and forwards them to a channel.
|
|
||||||
func (s *debugdServer) UploadAuthorizedKeys(ctx context.Context, in *pb.UploadAuthorizedKeysRequest) (*pb.UploadAuthorizedKeysResponse, error) {
|
|
||||||
s.log.Infof("Uploading authorized keys (Disabled feature)")
|
|
||||||
return &pb.UploadAuthorizedKeysResponse{
|
|
||||||
Status: pb.UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_SUCCESS,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadBootstrapper receives a bootstrapper binary in a stream of chunks and writes to a file.
|
// UploadBootstrapper receives a bootstrapper binary in a stream of chunks and writes to a file.
|
||||||
func (s *debugdServer) UploadBootstrapper(stream pb.Debugd_UploadBootstrapperServer) error {
|
func (s *debugdServer) UploadBootstrapper(stream pb.Debugd_UploadBootstrapperServer) error {
|
||||||
startAction := deploy.ServiceManagerRequest{
|
startAction := deploy.ServiceManagerRequest{
|
||||||
|
@ -114,21 +85,6 @@ func (s *debugdServer) DownloadBootstrapper(request *pb.DownloadBootstrapperRequ
|
||||||
return s.streamer.ReadStream(debugd.BootstrapperDeployFilename, stream, debugd.Chunksize, true)
|
return s.streamer.ReadStream(debugd.BootstrapperDeployFilename, stream, debugd.Chunksize, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadAuthorizedKeys streams the local authorized keys to other instances.
|
|
||||||
func (s *debugdServer) DownloadAuthorizedKeys(_ context.Context, req *pb.DownloadAuthorizedKeysRequest) (*pb.DownloadAuthorizedKeysResponse, error) {
|
|
||||||
s.log.Infof("Sending authorized keys to other instance")
|
|
||||||
|
|
||||||
var authKeys []*pb.AuthorizedKey
|
|
||||||
for _, key := range s.ssh.GetAuthorizedKeys() {
|
|
||||||
authKeys = append(authKeys, &pb.AuthorizedKey{
|
|
||||||
Username: key.Username,
|
|
||||||
KeyValue: key.PublicKey,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return &pb.DownloadAuthorizedKeysResponse{Keys: authKeys}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadSystemServiceUnits receives systemd service units, writes them to a service file and schedules a daemon-reload.
|
// UploadSystemServiceUnits receives systemd service units, writes them to a service file and schedules a daemon-reload.
|
||||||
func (s *debugdServer) UploadSystemServiceUnits(ctx context.Context, in *pb.UploadSystemdServiceUnitsRequest) (*pb.UploadSystemdServiceUnitsResponse, error) {
|
func (s *debugdServer) UploadSystemServiceUnits(ctx context.Context, in *pb.UploadSystemdServiceUnitsRequest) (*pb.UploadSystemdServiceUnitsResponse, error) {
|
||||||
s.log.Infof("Uploading systemd service units")
|
s.log.Infof("Uploading systemd service units")
|
||||||
|
@ -162,11 +118,6 @@ func Start(log *logger.Logger, wg *sync.WaitGroup, serv pb.DebugdServer) {
|
||||||
grpcServer.Serve(lis)
|
grpcServer.Serve(lis)
|
||||||
}
|
}
|
||||||
|
|
||||||
type sshDeployer interface {
|
|
||||||
DeployAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error
|
|
||||||
GetAuthorizedKeys() []ssh.UserKey
|
|
||||||
}
|
|
||||||
|
|
||||||
type serviceManager interface {
|
type serviceManager interface {
|
||||||
SystemdAction(ctx context.Context, request deploy.ServiceManagerRequest) error
|
SystemdAction(ctx context.Context, request deploy.ServiceManagerRequest) error
|
||||||
WriteSystemdUnitFile(ctx context.Context, unit deploy.SystemdUnit) error
|
WriteSystemdUnitFile(ctx context.Context, unit deploy.SystemdUnit) error
|
||||||
|
|
|
@ -19,7 +19,6 @@ import (
|
||||||
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd/deploy"
|
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd/deploy"
|
||||||
pb "github.com/edgelesssys/constellation/v2/debugd/service"
|
pb "github.com/edgelesssys/constellation/v2/debugd/service"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/ssh"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
|
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -33,90 +32,10 @@ func TestMain(m *testing.M) {
|
||||||
goleak.VerifyTestMain(m)
|
goleak.VerifyTestMain(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
// func TestUploadAuthorizedKeys(t *testing.T) {
|
|
||||||
// endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort)
|
|
||||||
|
|
||||||
// testCases := map[string]struct {
|
|
||||||
// ssh stubSSHDeployer
|
|
||||||
// serviceManager stubServiceManager
|
|
||||||
// request *pb.UploadAuthorizedKeysRequest
|
|
||||||
// wantErr bool
|
|
||||||
// wantResponseStatus pb.UploadAuthorizedKeysStatus
|
|
||||||
// wantKeys []ssh.UserKey
|
|
||||||
// }{
|
|
||||||
// "upload authorized keys works": {
|
|
||||||
// request: &pb.UploadAuthorizedKeysRequest{
|
|
||||||
// Keys: []*pb.AuthorizedKey{
|
|
||||||
// {
|
|
||||||
// Username: "testuser",
|
|
||||||
// KeyValue: "teskey",
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// wantResponseStatus: pb.UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_SUCCESS,
|
|
||||||
// wantKeys: []ssh.UserKey{
|
|
||||||
// {
|
|
||||||
// Username: "testuser",
|
|
||||||
// PublicKey: "teskey",
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// "deploy fails": {
|
|
||||||
// request: &pb.UploadAuthorizedKeysRequest{
|
|
||||||
// Keys: []*pb.AuthorizedKey{
|
|
||||||
// {
|
|
||||||
// Username: "testuser",
|
|
||||||
// KeyValue: "teskey",
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// ssh: stubSSHDeployer{deployErr: errors.New("ssh key deployment error")},
|
|
||||||
// wantResponseStatus: pb.UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_FAILURE,
|
|
||||||
// wantKeys: []ssh.UserKey{
|
|
||||||
// {
|
|
||||||
// Username: "testuser",
|
|
||||||
// PublicKey: "teskey",
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// }
|
|
||||||
|
|
||||||
// for name, tc := range testCases {
|
|
||||||
// t.Run(name, func(t *testing.T) {
|
|
||||||
// assert := assert.New(t)
|
|
||||||
// require := require.New(t)
|
|
||||||
|
|
||||||
// serv := debugdServer{
|
|
||||||
// log: logger.NewTest(t),
|
|
||||||
// ssh: &tc.ssh,
|
|
||||||
// serviceManager: &tc.serviceManager,
|
|
||||||
// streamer: &fakeStreamer{},
|
|
||||||
// }
|
|
||||||
|
|
||||||
// grpcServ, conn, err := setupServerWithConn(endpoint, &serv)
|
|
||||||
// require.NoError(err)
|
|
||||||
// defer conn.Close()
|
|
||||||
// client := pb.NewDebugdClient(conn)
|
|
||||||
// resp, err := client.UploadAuthorizedKeys(context.Background(), tc.request)
|
|
||||||
|
|
||||||
// grpcServ.GracefulStop()
|
|
||||||
|
|
||||||
// if tc.wantErr {
|
|
||||||
// assert.Error(err)
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
// require.NoError(err)
|
|
||||||
// assert.Equal(tc.wantResponseStatus, resp.Status)
|
|
||||||
// assert.ElementsMatch(tc.ssh.sshKeys, tc.wantKeys)
|
|
||||||
// })
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
func TestUploadBootstrapper(t *testing.T) {
|
func TestUploadBootstrapper(t *testing.T) {
|
||||||
endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort)
|
endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort)
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
ssh stubSSHDeployer
|
|
||||||
serviceManager stubServiceManager
|
serviceManager stubServiceManager
|
||||||
streamer fakeStreamer
|
streamer fakeStreamer
|
||||||
uploadChunks [][]byte
|
uploadChunks [][]byte
|
||||||
|
@ -164,7 +83,6 @@ func TestUploadBootstrapper(t *testing.T) {
|
||||||
|
|
||||||
serv := debugdServer{
|
serv := debugdServer{
|
||||||
log: logger.NewTest(t),
|
log: logger.NewTest(t),
|
||||||
ssh: &tc.ssh,
|
|
||||||
serviceManager: &tc.serviceManager,
|
serviceManager: &tc.serviceManager,
|
||||||
streamer: &tc.streamer,
|
streamer: &tc.streamer,
|
||||||
}
|
}
|
||||||
|
@ -201,7 +119,6 @@ func TestDownloadBootstrapper(t *testing.T) {
|
||||||
endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort)
|
endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort)
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
ssh stubSSHDeployer
|
|
||||||
serviceManager stubServiceManager
|
serviceManager stubServiceManager
|
||||||
request *pb.DownloadBootstrapperRequest
|
request *pb.DownloadBootstrapperRequest
|
||||||
streamer fakeStreamer
|
streamer fakeStreamer
|
||||||
|
@ -236,7 +153,6 @@ func TestDownloadBootstrapper(t *testing.T) {
|
||||||
|
|
||||||
serv := debugdServer{
|
serv := debugdServer{
|
||||||
log: logger.NewTest(t),
|
log: logger.NewTest(t),
|
||||||
ssh: &tc.ssh,
|
|
||||||
serviceManager: &tc.serviceManager,
|
serviceManager: &tc.serviceManager,
|
||||||
streamer: &tc.streamer,
|
streamer: &tc.streamer,
|
||||||
}
|
}
|
||||||
|
@ -261,44 +177,10 @@ func TestDownloadBootstrapper(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDownloadAuthorizedKeys(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
require := require.New(t)
|
|
||||||
|
|
||||||
endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort)
|
|
||||||
deployer := &stubSSHDeployer{
|
|
||||||
sshKeys: []ssh.UserKey{
|
|
||||||
{Username: "test1", PublicKey: "foo"},
|
|
||||||
{Username: "test2", PublicKey: "bar"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
serv := debugdServer{
|
|
||||||
log: logger.NewTest(t),
|
|
||||||
ssh: deployer,
|
|
||||||
}
|
|
||||||
|
|
||||||
grpcServ, conn, err := setupServerWithConn(endpoint, &serv)
|
|
||||||
require.NoError(err)
|
|
||||||
defer conn.Close()
|
|
||||||
defer grpcServ.GracefulStop()
|
|
||||||
client := pb.NewDebugdClient(conn)
|
|
||||||
|
|
||||||
resp, err := client.DownloadAuthorizedKeys(context.Background(), &pb.DownloadAuthorizedKeysRequest{})
|
|
||||||
|
|
||||||
assert.NoError(err)
|
|
||||||
wantKeys := []*pb.AuthorizedKey{
|
|
||||||
{Username: "test1", KeyValue: "foo"},
|
|
||||||
{Username: "test2", KeyValue: "bar"},
|
|
||||||
}
|
|
||||||
assert.ElementsMatch(wantKeys, resp.Keys)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUploadSystemServiceUnits(t *testing.T) {
|
func TestUploadSystemServiceUnits(t *testing.T) {
|
||||||
endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort)
|
endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort)
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
ssh stubSSHDeployer
|
|
||||||
serviceManager stubServiceManager
|
serviceManager stubServiceManager
|
||||||
request *pb.UploadSystemdServiceUnitsRequest
|
request *pb.UploadSystemdServiceUnitsRequest
|
||||||
wantErr bool
|
wantErr bool
|
||||||
|
@ -351,7 +233,6 @@ func TestUploadSystemServiceUnits(t *testing.T) {
|
||||||
|
|
||||||
serv := debugdServer{
|
serv := debugdServer{
|
||||||
log: logger.NewTest(t),
|
log: logger.NewTest(t),
|
||||||
ssh: &tc.ssh,
|
|
||||||
serviceManager: &tc.serviceManager,
|
serviceManager: &tc.serviceManager,
|
||||||
streamer: &fakeStreamer{},
|
streamer: &fakeStreamer{},
|
||||||
}
|
}
|
||||||
|
@ -375,22 +256,6 @@ func TestUploadSystemServiceUnits(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type stubSSHDeployer struct {
|
|
||||||
sshKeys []ssh.UserKey
|
|
||||||
|
|
||||||
deployErr error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stubSSHDeployer) DeployAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error {
|
|
||||||
s.sshKeys = append(s.sshKeys, sshKey)
|
|
||||||
|
|
||||||
return s.deployErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stubSSHDeployer) GetAuthorizedKeys() []ssh.UserKey {
|
|
||||||
return s.sshKeys
|
|
||||||
}
|
|
||||||
|
|
||||||
type stubServiceManager struct {
|
type stubServiceManager struct {
|
||||||
requests []deploy.ServiceManagerRequest
|
requests []deploy.ServiceManagerRequest
|
||||||
unitFiles []deploy.SystemdUnit
|
unitFiles []deploy.SystemdUnit
|
||||||
|
|
|
@ -20,52 +20,6 @@ const (
|
||||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
)
|
)
|
||||||
|
|
||||||
type UploadAuthorizedKeysStatus int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_SUCCESS UploadAuthorizedKeysStatus = 0
|
|
||||||
UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_FAILURE UploadAuthorizedKeysStatus = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
// Enum value maps for UploadAuthorizedKeysStatus.
|
|
||||||
var (
|
|
||||||
UploadAuthorizedKeysStatus_name = map[int32]string{
|
|
||||||
0: "UPLOAD_AUTHORIZED_KEYS_SUCCESS",
|
|
||||||
1: "UPLOAD_AUTHORIZED_KEYS_FAILURE",
|
|
||||||
}
|
|
||||||
UploadAuthorizedKeysStatus_value = map[string]int32{
|
|
||||||
"UPLOAD_AUTHORIZED_KEYS_SUCCESS": 0,
|
|
||||||
"UPLOAD_AUTHORIZED_KEYS_FAILURE": 1,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (x UploadAuthorizedKeysStatus) Enum() *UploadAuthorizedKeysStatus {
|
|
||||||
p := new(UploadAuthorizedKeysStatus)
|
|
||||||
*p = x
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x UploadAuthorizedKeysStatus) String() string {
|
|
||||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (UploadAuthorizedKeysStatus) Descriptor() protoreflect.EnumDescriptor {
|
|
||||||
return file_debugd_proto_enumTypes[0].Descriptor()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (UploadAuthorizedKeysStatus) Type() protoreflect.EnumType {
|
|
||||||
return &file_debugd_proto_enumTypes[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x UploadAuthorizedKeysStatus) Number() protoreflect.EnumNumber {
|
|
||||||
return protoreflect.EnumNumber(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use UploadAuthorizedKeysStatus.Descriptor instead.
|
|
||||||
func (UploadAuthorizedKeysStatus) EnumDescriptor() ([]byte, []int) {
|
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
type UploadBootstrapperStatus int32
|
type UploadBootstrapperStatus int32
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -102,11 +56,11 @@ func (x UploadBootstrapperStatus) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (UploadBootstrapperStatus) Descriptor() protoreflect.EnumDescriptor {
|
func (UploadBootstrapperStatus) Descriptor() protoreflect.EnumDescriptor {
|
||||||
return file_debugd_proto_enumTypes[1].Descriptor()
|
return file_debugd_proto_enumTypes[0].Descriptor()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (UploadBootstrapperStatus) Type() protoreflect.EnumType {
|
func (UploadBootstrapperStatus) Type() protoreflect.EnumType {
|
||||||
return &file_debugd_proto_enumTypes[1]
|
return &file_debugd_proto_enumTypes[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x UploadBootstrapperStatus) Number() protoreflect.EnumNumber {
|
func (x UploadBootstrapperStatus) Number() protoreflect.EnumNumber {
|
||||||
|
@ -115,7 +69,7 @@ func (x UploadBootstrapperStatus) Number() protoreflect.EnumNumber {
|
||||||
|
|
||||||
// Deprecated: Use UploadBootstrapperStatus.Descriptor instead.
|
// Deprecated: Use UploadBootstrapperStatus.Descriptor instead.
|
||||||
func (UploadBootstrapperStatus) EnumDescriptor() ([]byte, []int) {
|
func (UploadBootstrapperStatus) EnumDescriptor() ([]byte, []int) {
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{1}
|
return file_debugd_proto_rawDescGZIP(), []int{0}
|
||||||
}
|
}
|
||||||
|
|
||||||
type UploadSystemdServiceUnitsStatus int32
|
type UploadSystemdServiceUnitsStatus int32
|
||||||
|
@ -148,11 +102,11 @@ func (x UploadSystemdServiceUnitsStatus) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (UploadSystemdServiceUnitsStatus) Descriptor() protoreflect.EnumDescriptor {
|
func (UploadSystemdServiceUnitsStatus) Descriptor() protoreflect.EnumDescriptor {
|
||||||
return file_debugd_proto_enumTypes[2].Descriptor()
|
return file_debugd_proto_enumTypes[1].Descriptor()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (UploadSystemdServiceUnitsStatus) Type() protoreflect.EnumType {
|
func (UploadSystemdServiceUnitsStatus) Type() protoreflect.EnumType {
|
||||||
return &file_debugd_proto_enumTypes[2]
|
return &file_debugd_proto_enumTypes[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x UploadSystemdServiceUnitsStatus) Number() protoreflect.EnumNumber {
|
func (x UploadSystemdServiceUnitsStatus) Number() protoreflect.EnumNumber {
|
||||||
|
@ -161,7 +115,7 @@ func (x UploadSystemdServiceUnitsStatus) Number() protoreflect.EnumNumber {
|
||||||
|
|
||||||
// Deprecated: Use UploadSystemdServiceUnitsStatus.Descriptor instead.
|
// Deprecated: Use UploadSystemdServiceUnitsStatus.Descriptor instead.
|
||||||
func (UploadSystemdServiceUnitsStatus) EnumDescriptor() ([]byte, []int) {
|
func (UploadSystemdServiceUnitsStatus) EnumDescriptor() ([]byte, []int) {
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{2}
|
return file_debugd_proto_rawDescGZIP(), []int{1}
|
||||||
}
|
}
|
||||||
|
|
||||||
type DownloadBootstrapperRequest struct {
|
type DownloadBootstrapperRequest struct {
|
||||||
|
@ -202,240 +156,6 @@ func (*DownloadBootstrapperRequest) Descriptor() ([]byte, []int) {
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{0}
|
return file_debugd_proto_rawDescGZIP(), []int{0}
|
||||||
}
|
}
|
||||||
|
|
||||||
type DownloadAuthorizedKeysRequest struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DownloadAuthorizedKeysRequest) Reset() {
|
|
||||||
*x = DownloadAuthorizedKeysRequest{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_debugd_proto_msgTypes[1]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DownloadAuthorizedKeysRequest) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*DownloadAuthorizedKeysRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *DownloadAuthorizedKeysRequest) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_debugd_proto_msgTypes[1]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use DownloadAuthorizedKeysRequest.ProtoReflect.Descriptor instead.
|
|
||||||
func (*DownloadAuthorizedKeysRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
type DownloadAuthorizedKeysResponse struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Keys []*AuthorizedKey `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DownloadAuthorizedKeysResponse) Reset() {
|
|
||||||
*x = DownloadAuthorizedKeysResponse{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_debugd_proto_msgTypes[2]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DownloadAuthorizedKeysResponse) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*DownloadAuthorizedKeysResponse) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *DownloadAuthorizedKeysResponse) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_debugd_proto_msgTypes[2]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use DownloadAuthorizedKeysResponse.ProtoReflect.Descriptor instead.
|
|
||||||
func (*DownloadAuthorizedKeysResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DownloadAuthorizedKeysResponse) GetKeys() []*AuthorizedKey {
|
|
||||||
if x != nil {
|
|
||||||
return x.Keys
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type AuthorizedKey struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
|
|
||||||
KeyValue string `protobuf:"bytes,2,opt,name=key_value,json=keyValue,proto3" json:"key_value,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *AuthorizedKey) Reset() {
|
|
||||||
*x = AuthorizedKey{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_debugd_proto_msgTypes[3]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *AuthorizedKey) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*AuthorizedKey) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *AuthorizedKey) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_debugd_proto_msgTypes[3]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use AuthorizedKey.ProtoReflect.Descriptor instead.
|
|
||||||
func (*AuthorizedKey) Descriptor() ([]byte, []int) {
|
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *AuthorizedKey) GetUsername() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Username
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *AuthorizedKey) GetKeyValue() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.KeyValue
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type UploadAuthorizedKeysRequest struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Keys []*AuthorizedKey `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *UploadAuthorizedKeysRequest) Reset() {
|
|
||||||
*x = UploadAuthorizedKeysRequest{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_debugd_proto_msgTypes[4]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *UploadAuthorizedKeysRequest) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*UploadAuthorizedKeysRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *UploadAuthorizedKeysRequest) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_debugd_proto_msgTypes[4]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use UploadAuthorizedKeysRequest.ProtoReflect.Descriptor instead.
|
|
||||||
func (*UploadAuthorizedKeysRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{4}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *UploadAuthorizedKeysRequest) GetKeys() []*AuthorizedKey {
|
|
||||||
if x != nil {
|
|
||||||
return x.Keys
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type UploadAuthorizedKeysResponse struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Status UploadAuthorizedKeysStatus `protobuf:"varint,1,opt,name=status,proto3,enum=debugd.UploadAuthorizedKeysStatus" json:"status,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *UploadAuthorizedKeysResponse) Reset() {
|
|
||||||
*x = UploadAuthorizedKeysResponse{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_debugd_proto_msgTypes[5]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *UploadAuthorizedKeysResponse) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*UploadAuthorizedKeysResponse) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *UploadAuthorizedKeysResponse) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_debugd_proto_msgTypes[5]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use UploadAuthorizedKeysResponse.ProtoReflect.Descriptor instead.
|
|
||||||
func (*UploadAuthorizedKeysResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{5}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *UploadAuthorizedKeysResponse) GetStatus() UploadAuthorizedKeysStatus {
|
|
||||||
if x != nil {
|
|
||||||
return x.Status
|
|
||||||
}
|
|
||||||
return UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_SUCCESS
|
|
||||||
}
|
|
||||||
|
|
||||||
type Chunk struct {
|
type Chunk struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
|
@ -447,7 +167,7 @@ type Chunk struct {
|
||||||
func (x *Chunk) Reset() {
|
func (x *Chunk) Reset() {
|
||||||
*x = Chunk{}
|
*x = Chunk{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_debugd_proto_msgTypes[6]
|
mi := &file_debugd_proto_msgTypes[1]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
|
@ -460,7 +180,7 @@ func (x *Chunk) String() string {
|
||||||
func (*Chunk) ProtoMessage() {}
|
func (*Chunk) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *Chunk) ProtoReflect() protoreflect.Message {
|
func (x *Chunk) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_debugd_proto_msgTypes[6]
|
mi := &file_debugd_proto_msgTypes[1]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
@ -473,7 +193,7 @@ func (x *Chunk) ProtoReflect() protoreflect.Message {
|
||||||
|
|
||||||
// Deprecated: Use Chunk.ProtoReflect.Descriptor instead.
|
// Deprecated: Use Chunk.ProtoReflect.Descriptor instead.
|
||||||
func (*Chunk) Descriptor() ([]byte, []int) {
|
func (*Chunk) Descriptor() ([]byte, []int) {
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{6}
|
return file_debugd_proto_rawDescGZIP(), []int{1}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *Chunk) GetContent() []byte {
|
func (x *Chunk) GetContent() []byte {
|
||||||
|
@ -494,7 +214,7 @@ type UploadBootstrapperResponse struct {
|
||||||
func (x *UploadBootstrapperResponse) Reset() {
|
func (x *UploadBootstrapperResponse) Reset() {
|
||||||
*x = UploadBootstrapperResponse{}
|
*x = UploadBootstrapperResponse{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_debugd_proto_msgTypes[7]
|
mi := &file_debugd_proto_msgTypes[2]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
|
@ -507,7 +227,7 @@ func (x *UploadBootstrapperResponse) String() string {
|
||||||
func (*UploadBootstrapperResponse) ProtoMessage() {}
|
func (*UploadBootstrapperResponse) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *UploadBootstrapperResponse) ProtoReflect() protoreflect.Message {
|
func (x *UploadBootstrapperResponse) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_debugd_proto_msgTypes[7]
|
mi := &file_debugd_proto_msgTypes[2]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
@ -520,7 +240,7 @@ func (x *UploadBootstrapperResponse) ProtoReflect() protoreflect.Message {
|
||||||
|
|
||||||
// Deprecated: Use UploadBootstrapperResponse.ProtoReflect.Descriptor instead.
|
// Deprecated: Use UploadBootstrapperResponse.ProtoReflect.Descriptor instead.
|
||||||
func (*UploadBootstrapperResponse) Descriptor() ([]byte, []int) {
|
func (*UploadBootstrapperResponse) Descriptor() ([]byte, []int) {
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{7}
|
return file_debugd_proto_rawDescGZIP(), []int{2}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *UploadBootstrapperResponse) GetStatus() UploadBootstrapperStatus {
|
func (x *UploadBootstrapperResponse) GetStatus() UploadBootstrapperStatus {
|
||||||
|
@ -542,7 +262,7 @@ type ServiceUnit struct {
|
||||||
func (x *ServiceUnit) Reset() {
|
func (x *ServiceUnit) Reset() {
|
||||||
*x = ServiceUnit{}
|
*x = ServiceUnit{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_debugd_proto_msgTypes[8]
|
mi := &file_debugd_proto_msgTypes[3]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
|
@ -555,7 +275,7 @@ func (x *ServiceUnit) String() string {
|
||||||
func (*ServiceUnit) ProtoMessage() {}
|
func (*ServiceUnit) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *ServiceUnit) ProtoReflect() protoreflect.Message {
|
func (x *ServiceUnit) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_debugd_proto_msgTypes[8]
|
mi := &file_debugd_proto_msgTypes[3]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
@ -568,7 +288,7 @@ func (x *ServiceUnit) ProtoReflect() protoreflect.Message {
|
||||||
|
|
||||||
// Deprecated: Use ServiceUnit.ProtoReflect.Descriptor instead.
|
// Deprecated: Use ServiceUnit.ProtoReflect.Descriptor instead.
|
||||||
func (*ServiceUnit) Descriptor() ([]byte, []int) {
|
func (*ServiceUnit) Descriptor() ([]byte, []int) {
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{8}
|
return file_debugd_proto_rawDescGZIP(), []int{3}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ServiceUnit) GetName() string {
|
func (x *ServiceUnit) GetName() string {
|
||||||
|
@ -596,7 +316,7 @@ type UploadSystemdServiceUnitsRequest struct {
|
||||||
func (x *UploadSystemdServiceUnitsRequest) Reset() {
|
func (x *UploadSystemdServiceUnitsRequest) Reset() {
|
||||||
*x = UploadSystemdServiceUnitsRequest{}
|
*x = UploadSystemdServiceUnitsRequest{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_debugd_proto_msgTypes[9]
|
mi := &file_debugd_proto_msgTypes[4]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
|
@ -609,7 +329,7 @@ func (x *UploadSystemdServiceUnitsRequest) String() string {
|
||||||
func (*UploadSystemdServiceUnitsRequest) ProtoMessage() {}
|
func (*UploadSystemdServiceUnitsRequest) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *UploadSystemdServiceUnitsRequest) ProtoReflect() protoreflect.Message {
|
func (x *UploadSystemdServiceUnitsRequest) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_debugd_proto_msgTypes[9]
|
mi := &file_debugd_proto_msgTypes[4]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
@ -622,7 +342,7 @@ func (x *UploadSystemdServiceUnitsRequest) ProtoReflect() protoreflect.Message {
|
||||||
|
|
||||||
// Deprecated: Use UploadSystemdServiceUnitsRequest.ProtoReflect.Descriptor instead.
|
// Deprecated: Use UploadSystemdServiceUnitsRequest.ProtoReflect.Descriptor instead.
|
||||||
func (*UploadSystemdServiceUnitsRequest) Descriptor() ([]byte, []int) {
|
func (*UploadSystemdServiceUnitsRequest) Descriptor() ([]byte, []int) {
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{9}
|
return file_debugd_proto_rawDescGZIP(), []int{4}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *UploadSystemdServiceUnitsRequest) GetUnits() []*ServiceUnit {
|
func (x *UploadSystemdServiceUnitsRequest) GetUnits() []*ServiceUnit {
|
||||||
|
@ -643,7 +363,7 @@ type UploadSystemdServiceUnitsResponse struct {
|
||||||
func (x *UploadSystemdServiceUnitsResponse) Reset() {
|
func (x *UploadSystemdServiceUnitsResponse) Reset() {
|
||||||
*x = UploadSystemdServiceUnitsResponse{}
|
*x = UploadSystemdServiceUnitsResponse{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_debugd_proto_msgTypes[10]
|
mi := &file_debugd_proto_msgTypes[5]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
|
@ -656,7 +376,7 @@ func (x *UploadSystemdServiceUnitsResponse) String() string {
|
||||||
func (*UploadSystemdServiceUnitsResponse) ProtoMessage() {}
|
func (*UploadSystemdServiceUnitsResponse) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *UploadSystemdServiceUnitsResponse) ProtoReflect() protoreflect.Message {
|
func (x *UploadSystemdServiceUnitsResponse) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_debugd_proto_msgTypes[10]
|
mi := &file_debugd_proto_msgTypes[5]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
@ -669,7 +389,7 @@ func (x *UploadSystemdServiceUnitsResponse) ProtoReflect() protoreflect.Message
|
||||||
|
|
||||||
// Deprecated: Use UploadSystemdServiceUnitsResponse.ProtoReflect.Descriptor instead.
|
// Deprecated: Use UploadSystemdServiceUnitsResponse.ProtoReflect.Descriptor instead.
|
||||||
func (*UploadSystemdServiceUnitsResponse) Descriptor() ([]byte, []int) {
|
func (*UploadSystemdServiceUnitsResponse) Descriptor() ([]byte, []int) {
|
||||||
return file_debugd_proto_rawDescGZIP(), []int{10}
|
return file_debugd_proto_rawDescGZIP(), []int{5}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *UploadSystemdServiceUnitsResponse) GetStatus() UploadSystemdServiceUnitsStatus {
|
func (x *UploadSystemdServiceUnitsResponse) GetStatus() UploadSystemdServiceUnitsStatus {
|
||||||
|
@ -685,111 +405,70 @@ var file_debugd_proto_rawDesc = []byte{
|
||||||
0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06,
|
0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06,
|
||||||
0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f,
|
0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f,
|
||||||
0x61, 0x64, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x65,
|
0x61, 0x64, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x65,
|
||||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1f, 0x0a, 0x1d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61,
|
0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x21, 0x0a, 0x05, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x18,
|
||||||
0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52,
|
0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4b, 0x0a, 0x1e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f,
|
0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x56, 0x0a, 0x1a, 0x55, 0x70, 0x6c, 0x6f,
|
||||||
0x61, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73,
|
0x61, 0x64, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x65,
|
||||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73,
|
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||||
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e,
|
0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e,
|
||||||
0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6b,
|
0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70,
|
||||||
0x65, 0x79, 0x73, 0x22, 0x48, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65,
|
0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||||
0x64, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65,
|
0x22, 0x3d, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x12,
|
||||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65,
|
0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
|
||||||
0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
|
0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18,
|
||||||
0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x48, 0x0a,
|
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x22,
|
||||||
0x1b, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65,
|
0x4d, 0x0a, 0x20, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64,
|
||||||
0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x04,
|
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||||
0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x64, 0x65, 0x62,
|
0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
|
||||||
0x75, 0x67, 0x64, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4b, 0x65,
|
0x28, 0x0b, 0x32, 0x13, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x53, 0x65, 0x72, 0x76,
|
||||||
0x79, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x5a, 0x0a, 0x1c, 0x55, 0x70, 0x6c, 0x6f, 0x61,
|
0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, 0x64,
|
||||||
0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52,
|
0x0a, 0x21, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x53,
|
||||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
|
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||||
0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64,
|
0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20,
|
||||||
0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65,
|
0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, 0x6c,
|
||||||
0x64, 0x4b, 0x65, 0x79, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61,
|
|
||||||
0x74, 0x75, 0x73, 0x22, 0x21, 0x0a, 0x05, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x18, 0x0a, 0x07,
|
|
||||||
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63,
|
|
||||||
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x56, 0x0a, 0x1a, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64,
|
|
||||||
0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70,
|
|
||||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01,
|
|
||||||
0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70,
|
|
||||||
0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
|
|
||||||
0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x3d,
|
|
||||||
0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x12, 0x12, 0x0a,
|
|
||||||
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
|
|
||||||
0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20,
|
|
||||||
0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x4d, 0x0a,
|
|
||||||
0x20, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x53, 0x65,
|
|
||||||
0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
|
||||||
0x74, 0x12, 0x29, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
|
|
||||||
0x32, 0x13, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
|
|
||||||
0x65, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, 0x64, 0x0a, 0x21,
|
|
||||||
0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x53, 0x65, 0x72,
|
|
||||||
0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
|
||||||
0x65, 0x12, 0x3f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
|
|
||||||
0x0e, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61,
|
|
||||||
0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55,
|
|
||||||
0x6e, 0x69, 0x74, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74,
|
|
||||||
0x75, 0x73, 0x2a, 0x64, 0x0a, 0x1a, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x75, 0x74, 0x68,
|
|
||||||
0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
|
||||||
0x12, 0x22, 0x0a, 0x1e, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x4f,
|
|
||||||
0x52, 0x49, 0x5a, 0x45, 0x44, 0x5f, 0x4b, 0x45, 0x59, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45,
|
|
||||||
0x53, 0x53, 0x10, 0x00, 0x12, 0x22, 0x0a, 0x1e, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x41,
|
|
||||||
0x55, 0x54, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x45, 0x44, 0x5f, 0x4b, 0x45, 0x59, 0x53, 0x5f, 0x46,
|
|
||||||
0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0xad, 0x01, 0x0a, 0x18, 0x55, 0x70, 0x6c,
|
|
||||||
0x6f, 0x61, 0x64, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x53,
|
|
||||||
0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x1b, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f,
|
|
||||||
0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x50, 0x45, 0x52, 0x5f, 0x53, 0x55, 0x43,
|
|
||||||
0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x25, 0x0a, 0x21, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44,
|
|
||||||
0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x50, 0x45, 0x52, 0x5f, 0x55, 0x50,
|
|
||||||
0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x24, 0x0a,
|
|
||||||
0x20, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41,
|
|
||||||
0x50, 0x50, 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45,
|
|
||||||
0x44, 0x10, 0x02, 0x12, 0x23, 0x0a, 0x1f, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x4f,
|
|
||||||
0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x50, 0x45, 0x52, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x5f,
|
|
||||||
0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x03, 0x2a, 0x75, 0x0a, 0x1f, 0x55, 0x70, 0x6c, 0x6f,
|
|
||||||
0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
|
|
||||||
0x55, 0x6e, 0x69, 0x74, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x0a, 0x24, 0x55,
|
|
||||||
0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x44, 0x5f, 0x53, 0x45,
|
|
||||||
0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x54, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43,
|
|
||||||
0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x28, 0x0a, 0x24, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f,
|
|
||||||
0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f,
|
|
||||||
0x55, 0x4e, 0x49, 0x54, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x32,
|
|
||||||
0xe8, 0x03, 0x0a, 0x06, 0x44, 0x65, 0x62, 0x75, 0x67, 0x64, 0x12, 0x63, 0x0a, 0x14, 0x55, 0x70,
|
|
||||||
0x6c, 0x6f, 0x61, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4b, 0x65,
|
|
||||||
0x79, 0x73, 0x12, 0x23, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, 0x6c, 0x6f,
|
|
||||||
0x61, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73,
|
|
||||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64,
|
|
||||||
0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65,
|
|
||||||
0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
|
|
||||||
0x4b, 0x0a, 0x12, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
|
|
||||||
0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x0d, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x43,
|
|
||||||
0x68, 0x75, 0x6e, 0x6b, 0x1a, 0x22, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70,
|
|
||||||
0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
|
|
||||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x4e, 0x0a, 0x14,
|
|
||||||
0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61,
|
|
||||||
0x70, 0x70, 0x65, 0x72, 0x12, 0x23, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x44, 0x6f,
|
|
||||||
0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70,
|
|
||||||
0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x64, 0x65, 0x62, 0x75,
|
|
||||||
0x67, 0x64, 0x2e, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0x00, 0x30, 0x01, 0x12, 0x69, 0x0a, 0x16,
|
|
||||||
0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
|
|
||||||
0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x25, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e,
|
|
||||||
0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
|
|
||||||
0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e,
|
|
||||||
0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x41,
|
|
||||||
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73,
|
|
||||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x18, 0x55, 0x70, 0x6c, 0x6f, 0x61,
|
|
||||||
0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e,
|
|
||||||
0x69, 0x74, 0x73, 0x12, 0x28, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, 0x6c,
|
|
||||||
0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
|
0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
|
||||||
0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e,
|
0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74,
|
||||||
0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73,
|
0x61, 0x74, 0x75, 0x73, 0x2a, 0xad, 0x01, 0x0a, 0x18, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x42,
|
||||||
0x74, 0x65, 0x6d, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73,
|
0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75,
|
||||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69,
|
0x73, 0x12, 0x1f, 0x0a, 0x1b, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x4f, 0x4f, 0x54,
|
||||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, 0x73,
|
0x53, 0x54, 0x52, 0x41, 0x50, 0x50, 0x45, 0x52, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53,
|
||||||
0x73, 0x73, 0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x69,
|
0x10, 0x00, 0x12, 0x25, 0x0a, 0x21, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x4f, 0x4f,
|
||||||
0x6f, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2f, 0x73, 0x65, 0x72,
|
0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x50, 0x45, 0x52, 0x5f, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44,
|
||||||
0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x24, 0x0a, 0x20, 0x55, 0x50, 0x4c,
|
||||||
|
0x4f, 0x41, 0x44, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x50, 0x45, 0x52,
|
||||||
|
0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12,
|
||||||
|
0x23, 0x0a, 0x1f, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54,
|
||||||
|
0x52, 0x41, 0x50, 0x50, 0x45, 0x52, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x5f, 0x45, 0x58, 0x49, 0x53,
|
||||||
|
0x54, 0x53, 0x10, 0x03, 0x2a, 0x75, 0x0a, 0x1f, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79,
|
||||||
|
0x73, 0x74, 0x65, 0x6d, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74,
|
||||||
|
0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x0a, 0x24, 0x55, 0x50, 0x4c, 0x4f, 0x41,
|
||||||
|
0x44, 0x5f, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43,
|
||||||
|
0x45, 0x5f, 0x55, 0x4e, 0x49, 0x54, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10,
|
||||||
|
0x00, 0x12, 0x28, 0x0a, 0x24, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x53, 0x59, 0x53, 0x54,
|
||||||
|
0x45, 0x4d, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x54,
|
||||||
|
0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x32, 0x98, 0x02, 0x0a, 0x06,
|
||||||
|
0x44, 0x65, 0x62, 0x75, 0x67, 0x64, 0x12, 0x4b, 0x0a, 0x12, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64,
|
||||||
|
0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x0d, 0x2e, 0x64,
|
||||||
|
0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x1a, 0x22, 0x2e, 0x64, 0x65,
|
||||||
|
0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x6f, 0x74, 0x73,
|
||||||
|
0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||||
|
0x00, 0x28, 0x01, 0x12, 0x4e, 0x0a, 0x14, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x42,
|
||||||
|
0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x23, 0x2e, 0x64, 0x65,
|
||||||
|
0x62, 0x75, 0x67, 0x64, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x6f,
|
||||||
|
0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||||
|
0x1a, 0x0d, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x22,
|
||||||
|
0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x18, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73,
|
||||||
|
0x74, 0x65, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12,
|
||||||
|
0x28, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53,
|
||||||
|
0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69,
|
||||||
|
0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x64, 0x65, 0x62, 0x75,
|
||||||
|
0x67, 0x64, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64,
|
||||||
|
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||||
|
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||||
|
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x73, 0x79, 0x73,
|
||||||
|
0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76,
|
||||||
|
0x32, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
|
||||||
|
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -804,46 +483,33 @@ func file_debugd_proto_rawDescGZIP() []byte {
|
||||||
return file_debugd_proto_rawDescData
|
return file_debugd_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
|
||||||
var file_debugd_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
|
var file_debugd_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
|
||||||
var file_debugd_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
|
var file_debugd_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||||
var file_debugd_proto_goTypes = []interface{}{
|
var file_debugd_proto_goTypes = []interface{}{
|
||||||
(UploadAuthorizedKeysStatus)(0), // 0: debugd.UploadAuthorizedKeysStatus
|
(UploadBootstrapperStatus)(0), // 0: debugd.UploadBootstrapperStatus
|
||||||
(UploadBootstrapperStatus)(0), // 1: debugd.UploadBootstrapperStatus
|
(UploadSystemdServiceUnitsStatus)(0), // 1: debugd.UploadSystemdServiceUnitsStatus
|
||||||
(UploadSystemdServiceUnitsStatus)(0), // 2: debugd.UploadSystemdServiceUnitsStatus
|
(*DownloadBootstrapperRequest)(nil), // 2: debugd.DownloadBootstrapperRequest
|
||||||
(*DownloadBootstrapperRequest)(nil), // 3: debugd.DownloadBootstrapperRequest
|
(*Chunk)(nil), // 3: debugd.Chunk
|
||||||
(*DownloadAuthorizedKeysRequest)(nil), // 4: debugd.DownloadAuthorizedKeysRequest
|
(*UploadBootstrapperResponse)(nil), // 4: debugd.UploadBootstrapperResponse
|
||||||
(*DownloadAuthorizedKeysResponse)(nil), // 5: debugd.DownloadAuthorizedKeysResponse
|
(*ServiceUnit)(nil), // 5: debugd.ServiceUnit
|
||||||
(*AuthorizedKey)(nil), // 6: debugd.AuthorizedKey
|
(*UploadSystemdServiceUnitsRequest)(nil), // 6: debugd.UploadSystemdServiceUnitsRequest
|
||||||
(*UploadAuthorizedKeysRequest)(nil), // 7: debugd.UploadAuthorizedKeysRequest
|
(*UploadSystemdServiceUnitsResponse)(nil), // 7: debugd.UploadSystemdServiceUnitsResponse
|
||||||
(*UploadAuthorizedKeysResponse)(nil), // 8: debugd.UploadAuthorizedKeysResponse
|
|
||||||
(*Chunk)(nil), // 9: debugd.Chunk
|
|
||||||
(*UploadBootstrapperResponse)(nil), // 10: debugd.UploadBootstrapperResponse
|
|
||||||
(*ServiceUnit)(nil), // 11: debugd.ServiceUnit
|
|
||||||
(*UploadSystemdServiceUnitsRequest)(nil), // 12: debugd.UploadSystemdServiceUnitsRequest
|
|
||||||
(*UploadSystemdServiceUnitsResponse)(nil), // 13: debugd.UploadSystemdServiceUnitsResponse
|
|
||||||
}
|
}
|
||||||
var file_debugd_proto_depIdxs = []int32{
|
var file_debugd_proto_depIdxs = []int32{
|
||||||
6, // 0: debugd.DownloadAuthorizedKeysResponse.keys:type_name -> debugd.AuthorizedKey
|
0, // 0: debugd.UploadBootstrapperResponse.status:type_name -> debugd.UploadBootstrapperStatus
|
||||||
6, // 1: debugd.UploadAuthorizedKeysRequest.keys:type_name -> debugd.AuthorizedKey
|
5, // 1: debugd.UploadSystemdServiceUnitsRequest.units:type_name -> debugd.ServiceUnit
|
||||||
0, // 2: debugd.UploadAuthorizedKeysResponse.status:type_name -> debugd.UploadAuthorizedKeysStatus
|
1, // 2: debugd.UploadSystemdServiceUnitsResponse.status:type_name -> debugd.UploadSystemdServiceUnitsStatus
|
||||||
1, // 3: debugd.UploadBootstrapperResponse.status:type_name -> debugd.UploadBootstrapperStatus
|
3, // 3: debugd.Debugd.UploadBootstrapper:input_type -> debugd.Chunk
|
||||||
11, // 4: debugd.UploadSystemdServiceUnitsRequest.units:type_name -> debugd.ServiceUnit
|
2, // 4: debugd.Debugd.DownloadBootstrapper:input_type -> debugd.DownloadBootstrapperRequest
|
||||||
2, // 5: debugd.UploadSystemdServiceUnitsResponse.status:type_name -> debugd.UploadSystemdServiceUnitsStatus
|
6, // 5: debugd.Debugd.UploadSystemServiceUnits:input_type -> debugd.UploadSystemdServiceUnitsRequest
|
||||||
7, // 6: debugd.Debugd.UploadAuthorizedKeys:input_type -> debugd.UploadAuthorizedKeysRequest
|
4, // 6: debugd.Debugd.UploadBootstrapper:output_type -> debugd.UploadBootstrapperResponse
|
||||||
9, // 7: debugd.Debugd.UploadBootstrapper:input_type -> debugd.Chunk
|
3, // 7: debugd.Debugd.DownloadBootstrapper:output_type -> debugd.Chunk
|
||||||
3, // 8: debugd.Debugd.DownloadBootstrapper:input_type -> debugd.DownloadBootstrapperRequest
|
7, // 8: debugd.Debugd.UploadSystemServiceUnits:output_type -> debugd.UploadSystemdServiceUnitsResponse
|
||||||
4, // 9: debugd.Debugd.DownloadAuthorizedKeys:input_type -> debugd.DownloadAuthorizedKeysRequest
|
6, // [6:9] is the sub-list for method output_type
|
||||||
12, // 10: debugd.Debugd.UploadSystemServiceUnits:input_type -> debugd.UploadSystemdServiceUnitsRequest
|
3, // [3:6] is the sub-list for method input_type
|
||||||
8, // 11: debugd.Debugd.UploadAuthorizedKeys:output_type -> debugd.UploadAuthorizedKeysResponse
|
3, // [3:3] is the sub-list for extension type_name
|
||||||
10, // 12: debugd.Debugd.UploadBootstrapper:output_type -> debugd.UploadBootstrapperResponse
|
3, // [3:3] is the sub-list for extension extendee
|
||||||
9, // 13: debugd.Debugd.DownloadBootstrapper:output_type -> debugd.Chunk
|
0, // [0:3] is the sub-list for field type_name
|
||||||
5, // 14: debugd.Debugd.DownloadAuthorizedKeys:output_type -> debugd.DownloadAuthorizedKeysResponse
|
|
||||||
13, // 15: debugd.Debugd.UploadSystemServiceUnits:output_type -> debugd.UploadSystemdServiceUnitsResponse
|
|
||||||
11, // [11:16] is the sub-list for method output_type
|
|
||||||
6, // [6:11] is the sub-list for method input_type
|
|
||||||
6, // [6:6] is the sub-list for extension type_name
|
|
||||||
6, // [6:6] is the sub-list for extension extendee
|
|
||||||
0, // [0:6] is the sub-list for field type_name
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { file_debugd_proto_init() }
|
func init() { file_debugd_proto_init() }
|
||||||
|
@ -865,66 +531,6 @@ func file_debugd_proto_init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_debugd_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
file_debugd_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*DownloadAuthorizedKeysRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_debugd_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*DownloadAuthorizedKeysResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_debugd_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*AuthorizedKey); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_debugd_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*UploadAuthorizedKeysRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_debugd_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*UploadAuthorizedKeysResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_debugd_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*Chunk); i {
|
switch v := v.(*Chunk); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
|
@ -936,7 +542,7 @@ func file_debugd_proto_init() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_debugd_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
file_debugd_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*UploadBootstrapperResponse); i {
|
switch v := v.(*UploadBootstrapperResponse); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
|
@ -948,7 +554,7 @@ func file_debugd_proto_init() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_debugd_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
file_debugd_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*ServiceUnit); i {
|
switch v := v.(*ServiceUnit); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
|
@ -960,7 +566,7 @@ func file_debugd_proto_init() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_debugd_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
|
file_debugd_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*UploadSystemdServiceUnitsRequest); i {
|
switch v := v.(*UploadSystemdServiceUnitsRequest); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
|
@ -972,7 +578,7 @@ func file_debugd_proto_init() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_debugd_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
|
file_debugd_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*UploadSystemdServiceUnitsResponse); i {
|
switch v := v.(*UploadSystemdServiceUnitsResponse); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
|
@ -990,8 +596,8 @@ func file_debugd_proto_init() {
|
||||||
File: protoimpl.DescBuilder{
|
File: protoimpl.DescBuilder{
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: file_debugd_proto_rawDesc,
|
RawDescriptor: file_debugd_proto_rawDesc,
|
||||||
NumEnums: 3,
|
NumEnums: 2,
|
||||||
NumMessages: 11,
|
NumMessages: 6,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
NumServices: 1,
|
NumServices: 1,
|
||||||
},
|
},
|
||||||
|
|
|
@ -5,39 +5,13 @@ option go_package = "github.com/edgelesssys/constellation/v2/debugd/service";
|
||||||
package debugd;
|
package debugd;
|
||||||
|
|
||||||
service Debugd {
|
service Debugd {
|
||||||
rpc UploadAuthorizedKeys(UploadAuthorizedKeysRequest) returns (UploadAuthorizedKeysResponse) {}
|
|
||||||
rpc UploadBootstrapper(stream Chunk) returns (UploadBootstrapperResponse) {}
|
rpc UploadBootstrapper(stream Chunk) returns (UploadBootstrapperResponse) {}
|
||||||
rpc DownloadBootstrapper(DownloadBootstrapperRequest) returns (stream Chunk) {}
|
rpc DownloadBootstrapper(DownloadBootstrapperRequest) returns (stream Chunk) {}
|
||||||
rpc DownloadAuthorizedKeys(DownloadAuthorizedKeysRequest) returns (DownloadAuthorizedKeysResponse) {}
|
|
||||||
rpc UploadSystemServiceUnits(UploadSystemdServiceUnitsRequest) returns (UploadSystemdServiceUnitsResponse) {}
|
rpc UploadSystemServiceUnits(UploadSystemdServiceUnitsRequest) returns (UploadSystemdServiceUnitsResponse) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
message DownloadBootstrapperRequest {}
|
message DownloadBootstrapperRequest {}
|
||||||
|
|
||||||
message DownloadAuthorizedKeysRequest {}
|
|
||||||
|
|
||||||
message DownloadAuthorizedKeysResponse {
|
|
||||||
repeated AuthorizedKey keys = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message AuthorizedKey {
|
|
||||||
string username = 1;
|
|
||||||
string key_value = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UploadAuthorizedKeysRequest {
|
|
||||||
repeated AuthorizedKey keys = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UploadAuthorizedKeysResponse {
|
|
||||||
UploadAuthorizedKeysStatus status = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
enum UploadAuthorizedKeysStatus {
|
|
||||||
UPLOAD_AUTHORIZED_KEYS_SUCCESS = 0;
|
|
||||||
UPLOAD_AUTHORIZED_KEYS_FAILURE = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Chunk {
|
message Chunk {
|
||||||
bytes content = 1;
|
bytes content = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,10 +22,8 @@ const _ = grpc.SupportPackageIsVersion7
|
||||||
//
|
//
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||||
type DebugdClient interface {
|
type DebugdClient interface {
|
||||||
UploadAuthorizedKeys(ctx context.Context, in *UploadAuthorizedKeysRequest, opts ...grpc.CallOption) (*UploadAuthorizedKeysResponse, error)
|
|
||||||
UploadBootstrapper(ctx context.Context, opts ...grpc.CallOption) (Debugd_UploadBootstrapperClient, error)
|
UploadBootstrapper(ctx context.Context, opts ...grpc.CallOption) (Debugd_UploadBootstrapperClient, error)
|
||||||
DownloadBootstrapper(ctx context.Context, in *DownloadBootstrapperRequest, opts ...grpc.CallOption) (Debugd_DownloadBootstrapperClient, error)
|
DownloadBootstrapper(ctx context.Context, in *DownloadBootstrapperRequest, opts ...grpc.CallOption) (Debugd_DownloadBootstrapperClient, error)
|
||||||
DownloadAuthorizedKeys(ctx context.Context, in *DownloadAuthorizedKeysRequest, opts ...grpc.CallOption) (*DownloadAuthorizedKeysResponse, error)
|
|
||||||
UploadSystemServiceUnits(ctx context.Context, in *UploadSystemdServiceUnitsRequest, opts ...grpc.CallOption) (*UploadSystemdServiceUnitsResponse, error)
|
UploadSystemServiceUnits(ctx context.Context, in *UploadSystemdServiceUnitsRequest, opts ...grpc.CallOption) (*UploadSystemdServiceUnitsResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,15 +35,6 @@ func NewDebugdClient(cc grpc.ClientConnInterface) DebugdClient {
|
||||||
return &debugdClient{cc}
|
return &debugdClient{cc}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *debugdClient) UploadAuthorizedKeys(ctx context.Context, in *UploadAuthorizedKeysRequest, opts ...grpc.CallOption) (*UploadAuthorizedKeysResponse, error) {
|
|
||||||
out := new(UploadAuthorizedKeysResponse)
|
|
||||||
err := c.cc.Invoke(ctx, "/debugd.Debugd/UploadAuthorizedKeys", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *debugdClient) UploadBootstrapper(ctx context.Context, opts ...grpc.CallOption) (Debugd_UploadBootstrapperClient, error) {
|
func (c *debugdClient) UploadBootstrapper(ctx context.Context, opts ...grpc.CallOption) (Debugd_UploadBootstrapperClient, error) {
|
||||||
stream, err := c.cc.NewStream(ctx, &Debugd_ServiceDesc.Streams[0], "/debugd.Debugd/UploadBootstrapper", opts...)
|
stream, err := c.cc.NewStream(ctx, &Debugd_ServiceDesc.Streams[0], "/debugd.Debugd/UploadBootstrapper", opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -112,15 +101,6 @@ func (x *debugdDownloadBootstrapperClient) Recv() (*Chunk, error) {
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *debugdClient) DownloadAuthorizedKeys(ctx context.Context, in *DownloadAuthorizedKeysRequest, opts ...grpc.CallOption) (*DownloadAuthorizedKeysResponse, error) {
|
|
||||||
out := new(DownloadAuthorizedKeysResponse)
|
|
||||||
err := c.cc.Invoke(ctx, "/debugd.Debugd/DownloadAuthorizedKeys", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *debugdClient) UploadSystemServiceUnits(ctx context.Context, in *UploadSystemdServiceUnitsRequest, opts ...grpc.CallOption) (*UploadSystemdServiceUnitsResponse, error) {
|
func (c *debugdClient) UploadSystemServiceUnits(ctx context.Context, in *UploadSystemdServiceUnitsRequest, opts ...grpc.CallOption) (*UploadSystemdServiceUnitsResponse, error) {
|
||||||
out := new(UploadSystemdServiceUnitsResponse)
|
out := new(UploadSystemdServiceUnitsResponse)
|
||||||
err := c.cc.Invoke(ctx, "/debugd.Debugd/UploadSystemServiceUnits", in, out, opts...)
|
err := c.cc.Invoke(ctx, "/debugd.Debugd/UploadSystemServiceUnits", in, out, opts...)
|
||||||
|
@ -134,10 +114,8 @@ func (c *debugdClient) UploadSystemServiceUnits(ctx context.Context, in *UploadS
|
||||||
// All implementations must embed UnimplementedDebugdServer
|
// All implementations must embed UnimplementedDebugdServer
|
||||||
// for forward compatibility
|
// for forward compatibility
|
||||||
type DebugdServer interface {
|
type DebugdServer interface {
|
||||||
UploadAuthorizedKeys(context.Context, *UploadAuthorizedKeysRequest) (*UploadAuthorizedKeysResponse, error)
|
|
||||||
UploadBootstrapper(Debugd_UploadBootstrapperServer) error
|
UploadBootstrapper(Debugd_UploadBootstrapperServer) error
|
||||||
DownloadBootstrapper(*DownloadBootstrapperRequest, Debugd_DownloadBootstrapperServer) error
|
DownloadBootstrapper(*DownloadBootstrapperRequest, Debugd_DownloadBootstrapperServer) error
|
||||||
DownloadAuthorizedKeys(context.Context, *DownloadAuthorizedKeysRequest) (*DownloadAuthorizedKeysResponse, error)
|
|
||||||
UploadSystemServiceUnits(context.Context, *UploadSystemdServiceUnitsRequest) (*UploadSystemdServiceUnitsResponse, error)
|
UploadSystemServiceUnits(context.Context, *UploadSystemdServiceUnitsRequest) (*UploadSystemdServiceUnitsResponse, error)
|
||||||
mustEmbedUnimplementedDebugdServer()
|
mustEmbedUnimplementedDebugdServer()
|
||||||
}
|
}
|
||||||
|
@ -146,18 +124,12 @@ type DebugdServer interface {
|
||||||
type UnimplementedDebugdServer struct {
|
type UnimplementedDebugdServer struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (UnimplementedDebugdServer) UploadAuthorizedKeys(context.Context, *UploadAuthorizedKeysRequest) (*UploadAuthorizedKeysResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method UploadAuthorizedKeys not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedDebugdServer) UploadBootstrapper(Debugd_UploadBootstrapperServer) error {
|
func (UnimplementedDebugdServer) UploadBootstrapper(Debugd_UploadBootstrapperServer) error {
|
||||||
return status.Errorf(codes.Unimplemented, "method UploadBootstrapper not implemented")
|
return status.Errorf(codes.Unimplemented, "method UploadBootstrapper not implemented")
|
||||||
}
|
}
|
||||||
func (UnimplementedDebugdServer) DownloadBootstrapper(*DownloadBootstrapperRequest, Debugd_DownloadBootstrapperServer) error {
|
func (UnimplementedDebugdServer) DownloadBootstrapper(*DownloadBootstrapperRequest, Debugd_DownloadBootstrapperServer) error {
|
||||||
return status.Errorf(codes.Unimplemented, "method DownloadBootstrapper not implemented")
|
return status.Errorf(codes.Unimplemented, "method DownloadBootstrapper not implemented")
|
||||||
}
|
}
|
||||||
func (UnimplementedDebugdServer) DownloadAuthorizedKeys(context.Context, *DownloadAuthorizedKeysRequest) (*DownloadAuthorizedKeysResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method DownloadAuthorizedKeys not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedDebugdServer) UploadSystemServiceUnits(context.Context, *UploadSystemdServiceUnitsRequest) (*UploadSystemdServiceUnitsResponse, error) {
|
func (UnimplementedDebugdServer) UploadSystemServiceUnits(context.Context, *UploadSystemdServiceUnitsRequest) (*UploadSystemdServiceUnitsResponse, error) {
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method UploadSystemServiceUnits not implemented")
|
return nil, status.Errorf(codes.Unimplemented, "method UploadSystemServiceUnits not implemented")
|
||||||
}
|
}
|
||||||
|
@ -174,24 +146,6 @@ func RegisterDebugdServer(s grpc.ServiceRegistrar, srv DebugdServer) {
|
||||||
s.RegisterService(&Debugd_ServiceDesc, srv)
|
s.RegisterService(&Debugd_ServiceDesc, srv)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _Debugd_UploadAuthorizedKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(UploadAuthorizedKeysRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(DebugdServer).UploadAuthorizedKeys(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/debugd.Debugd/UploadAuthorizedKeys",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(DebugdServer).UploadAuthorizedKeys(ctx, req.(*UploadAuthorizedKeysRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _Debugd_UploadBootstrapper_Handler(srv interface{}, stream grpc.ServerStream) error {
|
func _Debugd_UploadBootstrapper_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
return srv.(DebugdServer).UploadBootstrapper(&debugdUploadBootstrapperServer{stream})
|
return srv.(DebugdServer).UploadBootstrapper(&debugdUploadBootstrapperServer{stream})
|
||||||
}
|
}
|
||||||
|
@ -239,24 +193,6 @@ func (x *debugdDownloadBootstrapperServer) Send(m *Chunk) error {
|
||||||
return x.ServerStream.SendMsg(m)
|
return x.ServerStream.SendMsg(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _Debugd_DownloadAuthorizedKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(DownloadAuthorizedKeysRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(DebugdServer).DownloadAuthorizedKeys(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/debugd.Debugd/DownloadAuthorizedKeys",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(DebugdServer).DownloadAuthorizedKeys(ctx, req.(*DownloadAuthorizedKeysRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _Debugd_UploadSystemServiceUnits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
func _Debugd_UploadSystemServiceUnits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
in := new(UploadSystemdServiceUnitsRequest)
|
in := new(UploadSystemdServiceUnitsRequest)
|
||||||
if err := dec(in); err != nil {
|
if err := dec(in); err != nil {
|
||||||
|
@ -282,14 +218,6 @@ var Debugd_ServiceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "debugd.Debugd",
|
ServiceName: "debugd.Debugd",
|
||||||
HandlerType: (*DebugdServer)(nil),
|
HandlerType: (*DebugdServer)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
|
||||||
MethodName: "UploadAuthorizedKeys",
|
|
||||||
Handler: _Debugd_UploadAuthorizedKeys_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "DownloadAuthorizedKeys",
|
|
||||||
Handler: _Debugd_DownloadAuthorizedKeys_Handler,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
MethodName: "UploadSystemServiceUnits",
|
MethodName: "UploadSystemServiceUnits",
|
||||||
Handler: _Debugd_UploadSystemServiceUnits_Handler,
|
Handler: _Debugd_UploadSystemServiceUnits_Handler,
|
||||||
|
|
|
@ -8,7 +8,6 @@ These features are provided by several components:
|
||||||
* The [JoinService](components.md#joinservice) joins new nodes to an existing cluster
|
* The [JoinService](components.md#joinservice) joins new nodes to an existing cluster
|
||||||
* The [VerificationService](components.md#verificationservice) provides remote attestation functionality
|
* The [VerificationService](components.md#verificationservice) provides remote attestation functionality
|
||||||
* The [Key Management Service (KMS)](components.md#kms) manages Constellation-internal keys
|
* The [Key Management Service (KMS)](components.md#kms) manages Constellation-internal keys
|
||||||
* The [AccessManager](components.md#accessmanager) manages node SSH access
|
|
||||||
|
|
||||||
The relations between components are shown in the following diagram:
|
The relations between components are shown in the following diagram:
|
||||||
|
|
||||||
|
@ -22,7 +21,6 @@ flowchart LR
|
||||||
C[Bootstrapper]
|
C[Bootstrapper]
|
||||||
end
|
end
|
||||||
subgraph Kubernetes
|
subgraph Kubernetes
|
||||||
D[AccessManager]
|
|
||||||
E[JoinService]
|
E[JoinService]
|
||||||
F[KMS]
|
F[KMS]
|
||||||
G[VerificationService]
|
G[VerificationService]
|
||||||
|
@ -74,8 +72,3 @@ Read more about the hardware-based [attestation feature](attestation.md) of Cons
|
||||||
The *KMS* runs as DaemonSet on each control-plane node.
|
The *KMS* runs as DaemonSet on each control-plane node.
|
||||||
It implements the key management for the [storage encryption keys](keys.md#storage-encryption) in Constellation. These keys are used for the [state disk](images.md#state-disk) of each node and the [transparently encrypted storage](encrypted-storage.md) for Kubernetes.
|
It implements the key management for the [storage encryption keys](keys.md#storage-encryption) in Constellation. These keys are used for the [state disk](images.md#state-disk) of each node and the [transparently encrypted storage](encrypted-storage.md) for Kubernetes.
|
||||||
Depending on wether the [constellation-managed](keys.md#constellation-managed-key-management) or [user-managed](keys.md#user-managed-key-management) mode is used, the *KMS* holds the key encryption key (KEK) directly or calls an external service for key derivation respectively.
|
Depending on wether the [constellation-managed](keys.md#constellation-managed-key-management) or [user-managed](keys.md#user-managed-key-management) mode is used, the *KMS* holds the key encryption key (KEK) directly or calls an external service for key derivation respectively.
|
||||||
|
|
||||||
## AccessManager
|
|
||||||
|
|
||||||
The *AccessManager* runs as DaemonSet on each node.
|
|
||||||
It manages the user's SSH access to nodes as specified in the config.
|
|
||||||
|
|
|
@ -1,59 +0,0 @@
|
||||||
# Manage SSH keys
|
|
||||||
|
|
||||||
Constellation allows you to create UNIX users that can connect to both control-plane and worker nodes over SSH. As the system partitions are read-only, users need to be re-created upon each restart of a node. This is automated by the *Access Manager*.
|
|
||||||
|
|
||||||
On cluster initialization, users defined in the `ssh-users` section of the Constellation configuration file are created and stored in the `ssh-users` ConfigMap in the `kube-system` namespace. For a running cluster, you can add or remove users by modifying the ConfigMap and restarting a node.
|
|
||||||
|
|
||||||
## Access Manager
|
|
||||||
The Access Manager supports all OpenSSH key types. These are RSA, ECDSA (using the `nistp256`, `nistp384`, `nistp521` curves) and Ed25519.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
All users are automatically created with `sudo` capabilities.
|
|
||||||
:::
|
|
||||||
|
|
||||||
The Access Manager is deployed as a DaemonSet called `constellation-access-manager`, running as an `initContainer` and afterward running a `pause` container to avoid automatic restarts. While technically killing the Pod and letting it restart works for the (re-)creation of users, it doesn't automatically remove users. Thus, a node restart is required after making changes to the ConfigMap.
|
|
||||||
|
|
||||||
When a user is deleted from the ConfigMap, it won't be re-created after the next restart of a node. The home directories of the affected users will be moved to `/var/evicted`.
|
|
||||||
|
|
||||||
You can update the ConfigMap by:
|
|
||||||
```bash
|
|
||||||
kubectl edit configmap -n kube-system ssh-users
|
|
||||||
```
|
|
||||||
|
|
||||||
Or alternatively, by modifying and re-applying it with the definition listed in the examples.
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
You can add a user `myuser` in `constellation-config.yaml` like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Create SSH users on Constellation nodes upon the first initialization of the cluster.
|
|
||||||
sshUsers:
|
|
||||||
myuser: "ssh-rsa AAAA...mgNJd9jc="
|
|
||||||
```
|
|
||||||
|
|
||||||
This user is then created upon the first initialization of the cluster, and translated into a ConfigMap as shown below:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: ssh-users
|
|
||||||
namespace: kube-system
|
|
||||||
data:
|
|
||||||
myuser: "ssh-rsa AAAA...mgNJd9jc="
|
|
||||||
```
|
|
||||||
|
|
||||||
You can add users by adding `data` entries:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: ssh-users
|
|
||||||
namespace: kube-system
|
|
||||||
data:
|
|
||||||
myuser: "ssh-rsa AAAA...mgNJd9jc="
|
|
||||||
anotheruser: "ssh-ed25519 AAAA...CldH"
|
|
||||||
```
|
|
||||||
|
|
||||||
Similarly, removing any entries causes users to be evicted upon the next restart of the node.
|
|
|
@ -45,3 +45,31 @@ Constellation uses the default bucket to store logs. Its [default retention peri
|
||||||
|
|
||||||
</tabItem>
|
</tabItem>
|
||||||
</tabs>
|
</tabs>
|
||||||
|
|
||||||
|
## Connect to nodes via SSH
|
||||||
|
|
||||||
|
Debugging via a shell on a node is [directly supported by Kubernetes](https://kubernetes.io/docs/tasks/debug/debug-application/debug-running-pod/#node-shell-session).
|
||||||
|
|
||||||
|
1. Figure out which node to connect to:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl get nodes
|
||||||
|
# or to see more information, such as IPs:
|
||||||
|
kubectl get nodes -o wide
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Connect to the node:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl debug node/constell-worker-xksa0-000000 -it --image=busybox
|
||||||
|
```
|
||||||
|
|
||||||
|
You will be presented with a prompt.
|
||||||
|
|
||||||
|
The nodes file system is mounted at `/host`.
|
||||||
|
|
||||||
|
3. Once finished, cleanup the debug pod:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl delete pod node-debugger-constell-worker-xksa0-000000-bjthj
|
||||||
|
```
|
||||||
|
|
|
@ -153,11 +153,6 @@ const sidebars = {
|
||||||
label: 'Verify your cluster',
|
label: 'Verify your cluster',
|
||||||
id: 'workflows/verify-cluster',
|
id: 'workflows/verify-cluster',
|
||||||
},
|
},
|
||||||
{
|
|
||||||
type: 'doc',
|
|
||||||
label: 'Manage SSH keys',
|
|
||||||
id: 'workflows/ssh',
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
type: 'doc',
|
type: 'doc',
|
||||||
label: 'Use persistent storage',
|
label: 'Use persistent storage',
|
||||||
|
|
|
@ -23,10 +23,7 @@ import (
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var publicIPAddressRegexp = regexp.MustCompile(`/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft.Network/publicIPAddresses/(?P<IPname>[^/]+)`)
|
||||||
publicIPAddressRegexp = regexp.MustCompile(`/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft.Network/publicIPAddresses/(?P<IPname>[^/]+)`)
|
|
||||||
keyPathRegexp = regexp.MustCompile(`^\/home\/([^\/]+)\/\.ssh\/authorized_keys$`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Metadata implements azure metadata APIs.
|
// Metadata implements azure metadata APIs.
|
||||||
type Metadata struct {
|
type Metadata struct {
|
||||||
|
@ -364,22 +361,6 @@ func extractInstanceTags(tags map[string]*string) map[string]string {
|
||||||
return metadataMap
|
return metadataMap
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractSSHKeys extracts SSH public keys from azure instance OS Profile.
|
|
||||||
func extractSSHKeys(sshConfig armcomputev2.SSHConfiguration) map[string][]string {
|
|
||||||
sshKeys := map[string][]string{}
|
|
||||||
for _, key := range sshConfig.PublicKeys {
|
|
||||||
if key == nil || key.Path == nil || key.KeyData == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
matches := keyPathRegexp.FindStringSubmatch(*key.Path)
|
|
||||||
if len(matches) != 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
sshKeys[matches[1]] = append(sshKeys[matches[1]], *key.KeyData)
|
|
||||||
}
|
|
||||||
return sshKeys
|
|
||||||
}
|
|
||||||
|
|
||||||
type cloudConfig struct {
|
type cloudConfig struct {
|
||||||
Cloud string `json:"cloud,omitempty"`
|
Cloud string `json:"cloud,omitempty"`
|
||||||
TenantID string `json:"tenantId,omitempty"`
|
TenantID string `json:"tenantId,omitempty"`
|
||||||
|
|
|
@ -28,7 +28,6 @@ func TestList(t *testing.T) {
|
||||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
||||||
Role: role.Worker,
|
Role: role.Worker,
|
||||||
VPCIP: "192.0.2.0",
|
VPCIP: "192.0.2.0",
|
||||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
|
@ -92,7 +91,6 @@ func TestSelf(t *testing.T) {
|
||||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
||||||
Role: role.Worker,
|
Role: role.Worker,
|
||||||
VPCIP: "192.0.2.0",
|
VPCIP: "192.0.2.0",
|
||||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
|
||||||
}
|
}
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
imdsAPI imdsAPI
|
imdsAPI imdsAPI
|
||||||
|
@ -561,70 +559,6 @@ func TestExtractInstanceTags(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExtractSSHKeys(t *testing.T) {
|
|
||||||
testCases := map[string]struct {
|
|
||||||
in armcomputev2.SSHConfiguration
|
|
||||||
wantKeys map[string][]string
|
|
||||||
}{
|
|
||||||
"ssh key is extracted": {
|
|
||||||
in: armcomputev2.SSHConfiguration{
|
|
||||||
PublicKeys: []*armcomputev2.SSHPublicKey{
|
|
||||||
{
|
|
||||||
KeyData: to.Ptr("key-data"),
|
|
||||||
Path: to.Ptr("/home/user/.ssh/authorized_keys"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantKeys: map[string][]string{"user": {"key-data"}},
|
|
||||||
},
|
|
||||||
"invalid path is skipped": {
|
|
||||||
in: armcomputev2.SSHConfiguration{
|
|
||||||
PublicKeys: []*armcomputev2.SSHPublicKey{
|
|
||||||
{
|
|
||||||
KeyData: to.Ptr("key-data"),
|
|
||||||
Path: to.Ptr("invalid-path"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantKeys: map[string][]string{},
|
|
||||||
},
|
|
||||||
"key data is nil": {
|
|
||||||
in: armcomputev2.SSHConfiguration{
|
|
||||||
PublicKeys: []*armcomputev2.SSHPublicKey{
|
|
||||||
{
|
|
||||||
Path: to.Ptr("/home/user/.ssh/authorized_keys"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantKeys: map[string][]string{},
|
|
||||||
},
|
|
||||||
"path is nil": {
|
|
||||||
in: armcomputev2.SSHConfiguration{
|
|
||||||
PublicKeys: []*armcomputev2.SSHPublicKey{
|
|
||||||
{
|
|
||||||
KeyData: to.Ptr("key-data"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantKeys: map[string][]string{},
|
|
||||||
},
|
|
||||||
"public keys are nil": {
|
|
||||||
in: armcomputev2.SSHConfiguration{},
|
|
||||||
wantKeys: map[string][]string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
keys := extractSSHKeys(tc.in)
|
|
||||||
|
|
||||||
assert.Equal(tc.wantKeys, keys)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newNetworkInterfacesStub() *stubNetworkInterfacesAPI {
|
func newNetworkInterfacesStub() *stubNetworkInterfacesAPI {
|
||||||
return &stubNetworkInterfacesAPI{
|
return &stubNetworkInterfacesAPI{
|
||||||
getInterface: armnetwork.Interface{
|
getInterface: armnetwork.Interface{
|
||||||
|
@ -673,16 +607,6 @@ func newVirtualMachineScaleSetsVMsStub() *stubVirtualMachineScaleSetVMsAPI {
|
||||||
},
|
},
|
||||||
OSProfile: &armcomputev2.OSProfile{
|
OSProfile: &armcomputev2.OSProfile{
|
||||||
ComputerName: to.Ptr("scale-set-name-instance-id"),
|
ComputerName: to.Ptr("scale-set-name-instance-id"),
|
||||||
LinuxConfiguration: &armcomputev2.LinuxConfiguration{
|
|
||||||
SSH: &armcomputev2.SSHConfiguration{
|
|
||||||
PublicKeys: []*armcomputev2.SSHPublicKey{
|
|
||||||
{
|
|
||||||
KeyData: to.Ptr("key-data"),
|
|
||||||
Path: to.Ptr("/home/user/.ssh/authorized_keys"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Tags: map[string]*string{
|
Tags: map[string]*string{
|
||||||
|
@ -706,16 +630,6 @@ func newVirtualMachineScaleSetsVMsStub() *stubVirtualMachineScaleSetVMsAPI {
|
||||||
},
|
},
|
||||||
OSProfile: &armcomputev2.OSProfile{
|
OSProfile: &armcomputev2.OSProfile{
|
||||||
ComputerName: to.Ptr("scale-set-name-instance-id"),
|
ComputerName: to.Ptr("scale-set-name-instance-id"),
|
||||||
LinuxConfiguration: &armcomputev2.LinuxConfiguration{
|
|
||||||
SSH: &armcomputev2.SSHConfiguration{
|
|
||||||
PublicKeys: []*armcomputev2.SSHPublicKey{
|
|
||||||
{
|
|
||||||
KeyData: to.Ptr("key-data"),
|
|
||||||
Path: to.Ptr("/home/user/.ssh/authorized_keys"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Tags: map[string]*string{
|
Tags: map[string]*string{
|
||||||
|
|
|
@ -87,12 +87,6 @@ func convertScaleSetVMToCoreInstance(vm armcomputev2.VirtualMachineScaleSetVM, n
|
||||||
if vm.Properties == nil || vm.Properties.OSProfile == nil || vm.Properties.OSProfile.ComputerName == nil {
|
if vm.Properties == nil || vm.Properties.OSProfile == nil || vm.Properties.OSProfile.ComputerName == nil {
|
||||||
return metadata.InstanceMetadata{}, errors.New("retrieving instance from armcompute API client returned no computer name")
|
return metadata.InstanceMetadata{}, errors.New("retrieving instance from armcompute API client returned no computer name")
|
||||||
}
|
}
|
||||||
var sshKeys map[string][]string
|
|
||||||
if vm.Properties.OSProfile.LinuxConfiguration == nil || vm.Properties.OSProfile.LinuxConfiguration.SSH == nil {
|
|
||||||
sshKeys = map[string][]string{}
|
|
||||||
} else {
|
|
||||||
sshKeys = extractSSHKeys(*vm.Properties.OSProfile.LinuxConfiguration.SSH)
|
|
||||||
}
|
|
||||||
|
|
||||||
if vm.Tags == nil {
|
if vm.Tags == nil {
|
||||||
return metadata.InstanceMetadata{}, errors.New("retrieving instance from armcompute API client returned no tags")
|
return metadata.InstanceMetadata{}, errors.New("retrieving instance from armcompute API client returned no tags")
|
||||||
|
@ -103,7 +97,6 @@ func convertScaleSetVMToCoreInstance(vm armcomputev2.VirtualMachineScaleSetVM, n
|
||||||
ProviderID: "azure://" + *vm.ID,
|
ProviderID: "azure://" + *vm.ID,
|
||||||
Role: extractScaleSetVMRole(vm.Tags),
|
Role: extractScaleSetVMRole(vm.Tags),
|
||||||
VPCIP: extractVPCIP(networkInterfaces),
|
VPCIP: extractVPCIP(networkInterfaces),
|
||||||
SSHKeys: sshKeys,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,6 @@ func TestGetScaleSetVM(t *testing.T) {
|
||||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
||||||
Role: role.Worker,
|
Role: role.Worker,
|
||||||
VPCIP: "192.0.2.0",
|
VPCIP: "192.0.2.0",
|
||||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
|
||||||
}
|
}
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
providerID string
|
providerID string
|
||||||
|
@ -87,7 +86,6 @@ func TestListScaleSetVMs(t *testing.T) {
|
||||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
||||||
Role: role.Worker,
|
Role: role.Worker,
|
||||||
VPCIP: "192.0.2.0",
|
VPCIP: "192.0.2.0",
|
||||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
|
@ -189,7 +187,6 @@ func TestConvertScaleSetVMToCoreInstance(t *testing.T) {
|
||||||
Name: "scale-set-name-instance-id",
|
Name: "scale-set-name-instance-id",
|
||||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
||||||
VPCIP: "192.0.2.0",
|
VPCIP: "192.0.2.0",
|
||||||
SSHKeys: map[string][]string{},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"invalid instance": {
|
"invalid instance": {
|
||||||
|
@ -290,16 +287,6 @@ func newListContainingNilScaleSetVirtualMachinesStub() *stubVirtualMachineScaleS
|
||||||
},
|
},
|
||||||
OSProfile: &armcomputev2.OSProfile{
|
OSProfile: &armcomputev2.OSProfile{
|
||||||
ComputerName: to.Ptr("scale-set-name-instance-id"),
|
ComputerName: to.Ptr("scale-set-name-instance-id"),
|
||||||
LinuxConfiguration: &armcomputev2.LinuxConfiguration{
|
|
||||||
SSH: &armcomputev2.SSHConfiguration{
|
|
||||||
PublicKeys: []*armcomputev2.SSHPublicKey{
|
|
||||||
{
|
|
||||||
KeyData: to.Ptr("key-data"),
|
|
||||||
Path: to.Ptr("/home/user/.ssh/authorized_keys"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -23,9 +23,7 @@ type InstanceMetadata struct {
|
||||||
Role role.Role
|
Role role.Role
|
||||||
// VPCIP is the primary IP address of the instance in the VPC.
|
// VPCIP is the primary IP address of the instance in the VPC.
|
||||||
VPCIP string
|
VPCIP string
|
||||||
// SSHKeys maps usernames to ssh public keys.
|
|
||||||
// TODO: remove everywhere.
|
|
||||||
SSHKeys map[string][]string
|
|
||||||
// SecondaryIPRange is the VPC wide CIDR from which subnets are attached to VMs as AliasIPRanges.
|
// SecondaryIPRange is the VPC wide CIDR from which subnets are attached to VMs as AliasIPRanges.
|
||||||
// May be empty on certain CSPs.
|
// May be empty on certain CSPs.
|
||||||
SecondaryIPRange string
|
SecondaryIPRange string
|
||||||
|
|
|
@ -55,7 +55,7 @@ type Config struct {
|
||||||
// Supported cloud providers and their specific configurations.
|
// Supported cloud providers and their specific configurations.
|
||||||
Provider ProviderConfig `yaml:"provider" validate:"dive"`
|
Provider ProviderConfig `yaml:"provider" validate:"dive"`
|
||||||
// description: |
|
// description: |
|
||||||
// Create SSH users on Constellation nodes.
|
// Deprecated: Does nothing! To get node SSH access, see: https://constellation-docs.edgeless.systems/constellation/workflows/troubleshooting#connect-to-nodes-via-ssh
|
||||||
// examples:
|
// examples:
|
||||||
// - value: '[]UserKey{ { Username: "Alice", PublicKey: "ssh-rsa AAAAB3NzaC...5QXHKW1rufgtJeSeJ8= alice@domain.com" } }'
|
// - value: '[]UserKey{ { Username: "Alice", PublicKey: "ssh-rsa AAAAB3NzaC...5QXHKW1rufgtJeSeJ8= alice@domain.com" } }'
|
||||||
SSHUsers []UserKey `yaml:"sshUsers,omitempty" validate:"dive"`
|
SSHUsers []UserKey `yaml:"sshUsers,omitempty" validate:"dive"`
|
||||||
|
@ -77,12 +77,20 @@ type UpgradeConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UserKey describes a user that should be created with corresponding public SSH key.
|
// UserKey describes a user that should be created with corresponding public SSH key.
|
||||||
|
//
|
||||||
|
// Deprecated: UserKey was used as configuration for access-manager, which was removed
|
||||||
|
// in v2.2, but config needs to retain these values for backwards compatibility and
|
||||||
|
// config validation.
|
||||||
type UserKey struct {
|
type UserKey struct {
|
||||||
// description: |
|
// description: |
|
||||||
// Username of new SSH user.
|
// Username of new SSH user.
|
||||||
|
//
|
||||||
|
// Deprecated: See UserKey.
|
||||||
Username string `yaml:"username" validate:"required"`
|
Username string `yaml:"username" validate:"required"`
|
||||||
// description: |
|
// description: |
|
||||||
// Public key of new SSH user.
|
// Public key of new SSH user.
|
||||||
|
//
|
||||||
|
// Deprecated: See UserKey.
|
||||||
PublicKey string `yaml:"publicKey" validate:"required"`
|
PublicKey string `yaml:"publicKey" validate:"required"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -54,8 +54,8 @@ func init() {
|
||||||
ConfigDoc.Fields[5].Name = "sshUsers"
|
ConfigDoc.Fields[5].Name = "sshUsers"
|
||||||
ConfigDoc.Fields[5].Type = "[]UserKey"
|
ConfigDoc.Fields[5].Type = "[]UserKey"
|
||||||
ConfigDoc.Fields[5].Note = ""
|
ConfigDoc.Fields[5].Note = ""
|
||||||
ConfigDoc.Fields[5].Description = "Create SSH users on Constellation nodes."
|
ConfigDoc.Fields[5].Description = "Deprecated: Does nothing! To get node SSH access, see: https://constellation-docs.edgeless.systems/constellation/workflows/troubleshooting#connect-to-nodes-via-ssh"
|
||||||
ConfigDoc.Fields[5].Comments[encoder.LineComment] = "Create SSH users on Constellation nodes."
|
ConfigDoc.Fields[5].Comments[encoder.LineComment] = "Deprecated: Does nothing! To get node SSH access, see: https://constellation-docs.edgeless.systems/constellation/workflows/troubleshooting#connect-to-nodes-via-ssh"
|
||||||
|
|
||||||
ConfigDoc.Fields[5].AddExample("", []UserKey{{Username: "Alice", PublicKey: "ssh-rsa AAAAB3NzaC...5QXHKW1rufgtJeSeJ8= alice@domain.com"}})
|
ConfigDoc.Fields[5].AddExample("", []UserKey{{Username: "Alice", PublicKey: "ssh-rsa AAAAB3NzaC...5QXHKW1rufgtJeSeJ8= alice@domain.com"}})
|
||||||
ConfigDoc.Fields[6].Name = "upgrade"
|
ConfigDoc.Fields[6].Name = "upgrade"
|
||||||
|
@ -91,7 +91,7 @@ func init() {
|
||||||
|
|
||||||
UserKeyDoc.Type = "UserKey"
|
UserKeyDoc.Type = "UserKey"
|
||||||
UserKeyDoc.Comments[encoder.LineComment] = "UserKey describes a user that should be created with corresponding public SSH key."
|
UserKeyDoc.Comments[encoder.LineComment] = "UserKey describes a user that should be created with corresponding public SSH key."
|
||||||
UserKeyDoc.Description = "UserKey describes a user that should be created with corresponding public SSH key."
|
UserKeyDoc.Description = "UserKey describes a user that should be created with corresponding public SSH key.\n\nDeprecated: UserKey was used as configuration for access-manager, which was removed\nin v2.2, but config needs to retain these values for backwards compatibility and\nconfig validation.\n"
|
||||||
|
|
||||||
UserKeyDoc.AddExample("", []UserKey{{Username: "Alice", PublicKey: "ssh-rsa AAAAB3NzaC...5QXHKW1rufgtJeSeJ8= alice@domain.com"}})
|
UserKeyDoc.AddExample("", []UserKey{{Username: "Alice", PublicKey: "ssh-rsa AAAAB3NzaC...5QXHKW1rufgtJeSeJ8= alice@domain.com"}})
|
||||||
UserKeyDoc.AppearsIn = []encoder.Appearance{
|
UserKeyDoc.AppearsIn = []encoder.Appearance{
|
||||||
|
@ -104,12 +104,12 @@ func init() {
|
||||||
UserKeyDoc.Fields[0].Name = "username"
|
UserKeyDoc.Fields[0].Name = "username"
|
||||||
UserKeyDoc.Fields[0].Type = "string"
|
UserKeyDoc.Fields[0].Type = "string"
|
||||||
UserKeyDoc.Fields[0].Note = ""
|
UserKeyDoc.Fields[0].Note = ""
|
||||||
UserKeyDoc.Fields[0].Description = "Username of new SSH user."
|
UserKeyDoc.Fields[0].Description = "Username of new SSH user.\n\nDeprecated: See UserKey."
|
||||||
UserKeyDoc.Fields[0].Comments[encoder.LineComment] = "Username of new SSH user."
|
UserKeyDoc.Fields[0].Comments[encoder.LineComment] = "Username of new SSH user."
|
||||||
UserKeyDoc.Fields[1].Name = "publicKey"
|
UserKeyDoc.Fields[1].Name = "publicKey"
|
||||||
UserKeyDoc.Fields[1].Type = "string"
|
UserKeyDoc.Fields[1].Type = "string"
|
||||||
UserKeyDoc.Fields[1].Note = ""
|
UserKeyDoc.Fields[1].Note = ""
|
||||||
UserKeyDoc.Fields[1].Description = "Public key of new SSH user."
|
UserKeyDoc.Fields[1].Description = "Public key of new SSH user.\n\nDeprecated: See UserKey."
|
||||||
UserKeyDoc.Fields[1].Comments[encoder.LineComment] = "Public key of new SSH user."
|
UserKeyDoc.Fields[1].Comments[encoder.LineComment] = "Public key of new SSH user."
|
||||||
|
|
||||||
ProviderConfigDoc.Type = "ProviderConfig"
|
ProviderConfigDoc.Type = "ProviderConfig"
|
||||||
|
@ -347,8 +347,8 @@ func init() {
|
||||||
GCPConfigDoc.Fields[8].Comments[encoder.LineComment] = "List of values that should be enforced to be equal to the ones from the measurement list. Any non-equal values not in this list will only result in a warning."
|
GCPConfigDoc.Fields[8].Comments[encoder.LineComment] = "List of values that should be enforced to be equal to the ones from the measurement list. Any non-equal values not in this list will only result in a warning."
|
||||||
|
|
||||||
QEMUConfigDoc.Type = "QEMUConfig"
|
QEMUConfigDoc.Type = "QEMUConfig"
|
||||||
QEMUConfigDoc.Comments[encoder.LineComment] = ""
|
QEMUConfigDoc.Comments[encoder.LineComment] = "QEMUConfig holds config information for QEMU based Constellation deployments."
|
||||||
QEMUConfigDoc.Description = ""
|
QEMUConfigDoc.Description = "QEMUConfig holds config information for QEMU based Constellation deployments."
|
||||||
QEMUConfigDoc.AppearsIn = []encoder.Appearance{
|
QEMUConfigDoc.AppearsIn = []encoder.Appearance{
|
||||||
{
|
{
|
||||||
TypeName: "ProviderConfig",
|
TypeName: "ProviderConfig",
|
||||||
|
|
|
@ -255,7 +255,6 @@ func TestConfigGeneratedDocsFresh(t *testing.T) {
|
||||||
|
|
||||||
assert.Len(ConfigDoc.Fields, reflect.ValueOf(Config{}).NumField(), updateMsg)
|
assert.Len(ConfigDoc.Fields, reflect.ValueOf(Config{}).NumField(), updateMsg)
|
||||||
assert.Len(UpgradeConfigDoc.Fields, reflect.ValueOf(UpgradeConfig{}).NumField(), updateMsg)
|
assert.Len(UpgradeConfigDoc.Fields, reflect.ValueOf(UpgradeConfig{}).NumField(), updateMsg)
|
||||||
assert.Len(UserKeyDoc.Fields, reflect.ValueOf(UserKey{}).NumField(), updateMsg)
|
|
||||||
assert.Len(ProviderConfigDoc.Fields, reflect.ValueOf(ProviderConfig{}).NumField(), updateMsg)
|
assert.Len(ProviderConfigDoc.Fields, reflect.ValueOf(ProviderConfig{}).NumField(), updateMsg)
|
||||||
assert.Len(AzureConfigDoc.Fields, reflect.ValueOf(AzureConfig{}).NumField(), updateMsg)
|
assert.Len(AzureConfigDoc.Fields, reflect.ValueOf(AzureConfig{}).NumField(), updateMsg)
|
||||||
assert.Len(GCPConfigDoc.Fields, reflect.ValueOf(GCPConfig{}).NumField(), updateMsg)
|
assert.Len(GCPConfigDoc.Fields, reflect.ValueOf(GCPConfig{}).NumField(), updateMsg)
|
||||||
|
|
|
@ -52,8 +52,6 @@ const (
|
||||||
KubernetesPort = 6443
|
KubernetesPort = 6443
|
||||||
// RecoveryPort port for Constellation recovery server.
|
// RecoveryPort port for Constellation recovery server.
|
||||||
RecoveryPort = 9999
|
RecoveryPort = 9999
|
||||||
// SSHPort port for SSH access.
|
|
||||||
SSHPort = 22
|
|
||||||
// DebugdPort port for debugd process.
|
// DebugdPort port for debugd process.
|
||||||
DebugdPort = 4000
|
DebugdPort = 4000
|
||||||
// KonnectivityPort port for konnectivity k8s service.
|
// KonnectivityPort port for konnectivity k8s service.
|
||||||
|
|
|
@ -1,50 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package ssh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FromProtoSlice converts a SSH UserKey definition from pubproto to the Go flavor.
|
|
||||||
func FromProtoSlice(input []*initproto.SSHUserKey) []UserKey {
|
|
||||||
if input == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
output := make([]UserKey, 0)
|
|
||||||
|
|
||||||
for _, pair := range input {
|
|
||||||
singlePair := UserKey{
|
|
||||||
Username: pair.Username,
|
|
||||||
PublicKey: pair.PublicKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
output = append(output, singlePair)
|
|
||||||
}
|
|
||||||
|
|
||||||
return output
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToProtoSlice converts a SSH UserKey definition from Go to pubproto flavor.
|
|
||||||
func ToProtoSlice(input []*UserKey) []*initproto.SSHUserKey {
|
|
||||||
if input == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
output := make([]*initproto.SSHUserKey, 0)
|
|
||||||
for _, pair := range input {
|
|
||||||
singlePair := initproto.SSHUserKey{
|
|
||||||
Username: pair.Username,
|
|
||||||
PublicKey: pair.PublicKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
output = append(output, &singlePair)
|
|
||||||
}
|
|
||||||
|
|
||||||
return output
|
|
||||||
}
|
|
|
@ -1,42 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package ssh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestToAndFromProtoSlice(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
DemoSSHUser1 := UserKey{
|
|
||||||
Username: "test-user-2",
|
|
||||||
PublicKey: "ssh-rsa abcdefg",
|
|
||||||
}
|
|
||||||
|
|
||||||
DemoSSHUser2 := UserKey{
|
|
||||||
Username: "test-user-2",
|
|
||||||
PublicKey: "ssh-rsa hijklmnop",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Input usually consists of pointers (from config parsing)
|
|
||||||
DemoSSHUsersPointers := make([]*UserKey, 0)
|
|
||||||
DemoSSHUsersPointers = append(DemoSSHUsersPointers, &DemoSSHUser1)
|
|
||||||
DemoSSHUsersPointers = append(DemoSSHUsersPointers, &DemoSSHUser2)
|
|
||||||
|
|
||||||
// Expected output usually does not consist of pointers
|
|
||||||
DemoSSHUsersNoPointers := make([]UserKey, 0)
|
|
||||||
DemoSSHUsersNoPointers = append(DemoSSHUsersNoPointers, DemoSSHUser1)
|
|
||||||
DemoSSHUsersNoPointers = append(DemoSSHUsersNoPointers, DemoSSHUser2)
|
|
||||||
|
|
||||||
ToProtoArray := ToProtoSlice(DemoSSHUsersPointers)
|
|
||||||
FromProtoArray := FromProtoSlice(ToProtoArray)
|
|
||||||
|
|
||||||
assert.Equal(DemoSSHUsersNoPointers, FromProtoArray)
|
|
||||||
}
|
|
|
@ -1,114 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package ssh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/user"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UserKey describes an user that should be created with a corresponding public SSH key.
|
|
||||||
type UserKey struct {
|
|
||||||
Username string
|
|
||||||
PublicKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Access reads SSH public keys from a channel, creates the specified users if required and writes the public keys to the users authorized_keys file.
|
|
||||||
type Access struct {
|
|
||||||
log *logger.Logger
|
|
||||||
userManager user.LinuxUserManager
|
|
||||||
authorized map[UserKey]bool
|
|
||||||
mux sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAccess creates a new Access.
|
|
||||||
func NewAccess(log *logger.Logger, userManager user.LinuxUserManager) *Access {
|
|
||||||
return &Access{
|
|
||||||
log: log,
|
|
||||||
userManager: userManager,
|
|
||||||
authorized: map[UserKey]bool{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// alreadyAuthorized checks if key was written to authorized keys before.
|
|
||||||
func (s *Access) alreadyAuthorized(sshKey UserKey) bool {
|
|
||||||
_, ok := s.authorized[sshKey]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// rememberAuthorized marks this key as already written to authorized keys..
|
|
||||||
func (s *Access) rememberAuthorized(sshKey UserKey) {
|
|
||||||
s.authorized[sshKey] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAuthorizedKeys returns a list of authorized keys for the specified user.
|
|
||||||
func (s *Access) GetAuthorizedKeys() []UserKey {
|
|
||||||
s.mux.Lock()
|
|
||||||
defer s.mux.Unlock()
|
|
||||||
|
|
||||||
var authorizedKeys []UserKey
|
|
||||||
for key := range s.authorized {
|
|
||||||
authorizedKeys = append(authorizedKeys, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
return authorizedKeys
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeployAuthorizedKey takes an user & public key pair, creates the user if required and deploy a SSH key for them.
|
|
||||||
// TODO: Refactor to not write to /etc or /home.
|
|
||||||
func (s *Access) DeployAuthorizedKey(ctx context.Context, sshKey UserKey) error {
|
|
||||||
// allow only one thread to write to authorized keys, create users and update the authorized map at a time
|
|
||||||
s.mux.Lock()
|
|
||||||
defer s.mux.Unlock()
|
|
||||||
if s.alreadyAuthorized(sshKey) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
s.log.With(zap.String("username", sshKey.Username)).Infof("Trying to deploy ssh key for user")
|
|
||||||
user, err := s.userManager.EnsureLinuxUserExists(ctx, sshKey.Username)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// CoreOS uses https://github.com/coreos/ssh-key-dir to search for ssh keys in ~/.ssh/authorized_keys.d/*
|
|
||||||
sshFolder := fmt.Sprintf("%s/.ssh", user.Home)
|
|
||||||
authorizedKeysD := fmt.Sprintf("%s/authorized_keys.d", sshFolder)
|
|
||||||
if err := s.userManager.Fs.MkdirAll(authorizedKeysD, 0o700); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.userManager.Fs.Chown(sshFolder, user.UID, user.GID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.userManager.Fs.Chown(authorizedKeysD, user.UID, user.GID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
authorizedKeysPath := fmt.Sprintf("%s/constellation-ssh-keys", authorizedKeysD)
|
|
||||||
authorizedKeysFile, err := s.userManager.Fs.OpenFile(authorizedKeysPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = authorizedKeysFile.WriteString(fmt.Sprintf("%s\n", sshKey.PublicKey))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := authorizedKeysFile.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.userManager.Fs.Chown(authorizedKeysPath, user.UID, user.GID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.userManager.Fs.Chmod(authorizedKeysPath, 0o644); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.rememberAuthorized(sshKey)
|
|
||||||
s.log.With(zap.String("username", sshKey.Username)).Infof("Successfully authorized user")
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,149 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package ssh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/user"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/goleak"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
|
||||||
goleak.VerifyTestMain(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetAuthorizedKeys(t *testing.T) {
|
|
||||||
testCases := map[string]struct {
|
|
||||||
authorized map[UserKey]bool
|
|
||||||
want []UserKey
|
|
||||||
}{
|
|
||||||
"success": {
|
|
||||||
authorized: map[UserKey]bool{
|
|
||||||
{Username: "user1", PublicKey: "ssh-rsa test1=="}: true,
|
|
||||||
{Username: "user2", PublicKey: "ssh-rsa test2=="}: true,
|
|
||||||
},
|
|
||||||
want: []UserKey{
|
|
||||||
{Username: "user1", PublicKey: "ssh-rsa test1=="},
|
|
||||||
{Username: "user2", PublicKey: "ssh-rsa test2=="},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"empty": {
|
|
||||||
authorized: map[UserKey]bool{},
|
|
||||||
want: []UserKey(nil),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
sshAccess := Access{authorized: tc.authorized}
|
|
||||||
|
|
||||||
keys := sshAccess.GetAuthorizedKeys()
|
|
||||||
assert.ElementsMatch(tc.want, keys)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeployAuthorizedKey(t *testing.T) {
|
|
||||||
authorizedKey := UserKey{
|
|
||||||
Username: "user",
|
|
||||||
PublicKey: "ssh-rsa testkey",
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := map[string]struct {
|
|
||||||
fs afero.Fs
|
|
||||||
passwdContents string
|
|
||||||
alreadyDeployed bool
|
|
||||||
readonly bool
|
|
||||||
wantErr bool
|
|
||||||
wantFile bool
|
|
||||||
wantFileContents string
|
|
||||||
}{
|
|
||||||
"deploy works": {
|
|
||||||
fs: afero.NewMemMapFs(),
|
|
||||||
wantErr: false,
|
|
||||||
wantFile: true,
|
|
||||||
wantFileContents: "ssh-rsa testkey\n",
|
|
||||||
},
|
|
||||||
"appending ssh key works": {
|
|
||||||
fs: memMapFsWithFile("/var/home/user/.ssh/authorized_keys.d/constellation-ssh-keys", "ssh-rsa preexistingkey\n"),
|
|
||||||
wantErr: false,
|
|
||||||
wantFile: true,
|
|
||||||
wantFileContents: "ssh-rsa preexistingkey\nssh-rsa testkey\n",
|
|
||||||
},
|
|
||||||
"redeployment avoided": {
|
|
||||||
fs: afero.NewMemMapFs(),
|
|
||||||
wantErr: false,
|
|
||||||
alreadyDeployed: true,
|
|
||||||
wantFile: false,
|
|
||||||
},
|
|
||||||
"readonly fs": {
|
|
||||||
fs: afero.NewMemMapFs(),
|
|
||||||
readonly: true,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
require := require.New(t)
|
|
||||||
userManager := user.NewLinuxUserManagerFake(tc.fs)
|
|
||||||
|
|
||||||
assert.NoError(afero.WriteFile(userManager.Fs, "/etc/passwd", []byte(tc.passwdContents), 0o755))
|
|
||||||
if tc.readonly {
|
|
||||||
userManager.Fs = afero.NewReadOnlyFs(userManager.Fs)
|
|
||||||
}
|
|
||||||
authorized := map[UserKey]bool{}
|
|
||||||
if tc.alreadyDeployed {
|
|
||||||
authorized[UserKey{
|
|
||||||
Username: "user",
|
|
||||||
PublicKey: "ssh-rsa testkey",
|
|
||||||
}] = true
|
|
||||||
}
|
|
||||||
sshAccess := Access{
|
|
||||||
log: logger.NewTest(t),
|
|
||||||
userManager: userManager,
|
|
||||||
mux: sync.Mutex{},
|
|
||||||
authorized: authorized,
|
|
||||||
}
|
|
||||||
err := sshAccess.DeployAuthorizedKey(context.Background(), authorizedKey)
|
|
||||||
|
|
||||||
if tc.wantErr {
|
|
||||||
assert.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
require.NoError(err)
|
|
||||||
if tc.wantFile {
|
|
||||||
fileContents, err := afero.ReadFile(userManager.Fs, "/var/home/user/.ssh/authorized_keys.d/constellation-ssh-keys")
|
|
||||||
assert.NoError(err)
|
|
||||||
assert.Equal(tc.wantFileContents, string(fileContents))
|
|
||||||
} else {
|
|
||||||
exists, err := afero.Exists(userManager.Fs, "/var/home/user/.ssh/authorized_keys.d/constellation-ssh-keys")
|
|
||||||
assert.NoError(err)
|
|
||||||
assert.False(exists)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func memMapFsWithFile(path string, contents string) afero.Fs {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
err := afero.WriteFile(fs, path, []byte(contents), 0o755)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return fs
|
|
||||||
}
|
|
|
@ -53,8 +53,6 @@ const (
|
||||||
KonnectivityServerImage = "registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c" // renovate:container
|
KonnectivityServerImage = "registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c" // renovate:container
|
||||||
// JoinImage image of Constellation join service.
|
// JoinImage image of Constellation join service.
|
||||||
JoinImage = "ghcr.io/edgelesssys/constellation/join-service:v2.3.0-pre.0.20221109145754-0d12e37c9699@sha256:afe838fdf9753a6d50eef3a36a17d8993c1732397bb3f965dd25f291e7521acc" // renovate:container
|
JoinImage = "ghcr.io/edgelesssys/constellation/join-service:v2.3.0-pre.0.20221109145754-0d12e37c9699@sha256:afe838fdf9753a6d50eef3a36a17d8993c1732397bb3f965dd25f291e7521acc" // renovate:container
|
||||||
// AccessManagerImage image of Constellation access manager.
|
|
||||||
AccessManagerImage = "ghcr.io/edgelesssys/constellation/access-manager:v2.3.0-pre.0.20221109145754-0d12e37c9699@sha256:9fe850517115851a0e0969401cc407dad2f2f9157aac86b69db51e28ee4559c4" // renovate:container
|
|
||||||
// KmsImage image of Constellation KMS server.
|
// KmsImage image of Constellation KMS server.
|
||||||
KmsImage = "ghcr.io/edgelesssys/constellation/kmsserver:v2.3.0-pre.0.20221109145754-0d12e37c9699@sha256:bed58eff5ca1ad2bb0eddfdbb642a5dc5454bfd6a0248487ae8e2756227e0e80" // renovate:container
|
KmsImage = "ghcr.io/edgelesssys/constellation/kmsserver:v2.3.0-pre.0.20221109145754-0d12e37c9699@sha256:bed58eff5ca1ad2bb0eddfdbb642a5dc5454bfd6a0248487ae8e2756227e0e80" // renovate:container
|
||||||
// VerificationImage image of Constellation verification service.
|
// VerificationImage image of Constellation verification service.
|
||||||
|
|
|
@ -174,7 +174,7 @@ we should add comments to those fields who will not update the cluster.
|
||||||
```yaml
|
```yaml
|
||||||
kubernetesVersion: 1.24.3
|
kubernetesVersion: 1.24.3
|
||||||
kubernetesServicesVersion: 1.24.5 # Bundled Kubernetes components (Autoscaler, CloudControllerManager, CloudNodeManager, GCP Guest Agent, Konnectivity)
|
kubernetesServicesVersion: 1.24.5 # Bundled Kubernetes components (Autoscaler, CloudControllerManager, CloudNodeManager, GCP Guest Agent, Konnectivity)
|
||||||
microserviceVersion: 2.2.0 # or constellationVersion: (KMS, AccessManager, JoinService, NodeMaintainanceOperator, NodeOperator, OLM, Verification, Cilium)
|
microserviceVersion: 2.2.0 # or constellationVersion: (KMS, JoinService, NodeMaintainanceOperator, NodeOperator, OLM, Verification, Cilium)
|
||||||
provider:
|
provider:
|
||||||
azure:
|
azure:
|
||||||
image: /communityGalleries/ConstellationCVM-b3782fa0-0df7-4f2f-963e-fc7fc42663df/images/constellation/versions/2.3.0
|
image: /communityGalleries/ConstellationCVM-b3782fa0-0df7-4f2f-963e-fc7fc42663df/images/constellation/versions/2.3.0
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue