Replace logging with default logging interface (#233)

* Add test logger

* Refactor access manager logging

* Refactor activation service logging

* Refactor debugd logging

* Refactor kms server logging

* Refactor disk-mapper logging

Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
Daniel Weiße 2022-06-28 16:51:30 +02:00 committed by GitHub
parent e3f78a5bff
commit b10b13b173
42 changed files with 513 additions and 328 deletions

View File

@ -2,8 +2,8 @@ package main
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@ -12,7 +12,10 @@ import (
"github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/deploy/ssh"
"github.com/edgelesssys/constellation/internal/deploy/user" "github.com/edgelesssys/constellation/internal/deploy/user"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
v1Options "k8s.io/apimachinery/pkg/apis/meta/v1" v1Options "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -30,7 +33,7 @@ const (
// evictedHomePath holds the directory to which deleted user directories are moved to. // evictedHomePath holds the directory to which deleted user directories are moved to.
evictedHomePath = "/var/evicted" evictedHomePath = "/var/evicted"
// relativePathToSSHKeys holds the path inside an user's directory to the SSH keys. // relativePathToSSHKeys holds the path inside a user's directory to the SSH keys.
// Needs to be in sync with internal/deploy/ssh.go. // Needs to be in sync with internal/deploy/ssh.go.
relativePathToSSHKeys = ".ssh/authorized_keys.d/constellation-ssh-keys" relativePathToSSHKeys = ".ssh/authorized_keys.d/constellation-ssh-keys"
@ -45,33 +48,35 @@ type uidGIDPair struct {
} }
func main() { func main() {
log := logger.New(logger.JSONLog, zapcore.InfoLevel)
hostname, err := os.Hostname() hostname, err := os.Hostname()
if err != nil { if err != nil {
log.Println("Starting constellation-access-manager as unknown pod.") log.Warnf("Starting constellation-access-manager as unknown pod")
} else { } else {
log.Println("Starting constellation-access-manager as", hostname) log.Infof("Starting constellation-access-manager as %q", hostname)
} }
// Retrieve configMap from Kubernetes API before we chroot into the host filesystem. // Retrieve configMap from Kubernetes API before we chroot into the host filesystem.
configMap, err := retrieveConfigMap() configMap, err := retrieveConfigMap(log)
if err != nil { if err != nil {
log.Panicf("Failed to retrieve ConfigMap from Kubernetes API: %v", err) log.With(zap.Error(err)).Fatalf("Failed to retrieve ConfigMap from Kubernetes API")
} }
// Chroot into main system // Chroot into main system
if err := syscall.Chroot(hostPath); err != nil { if err := syscall.Chroot(hostPath); err != nil {
log.Panicf("Failed to chroot into host filesystem: %v", err) log.With(zap.Error(err)).Fatalf("Failed to chroot into host filesystem")
} }
if err := syscall.Chdir("/"); err != nil { if err := syscall.Chdir("/"); err != nil {
log.Panicf("Failed to chdir into host filesystem: %v", err) log.With(zap.Error(err)).Fatalf("Failed to chdir into host filesystem")
} }
fs := afero.NewOsFs() fs := afero.NewOsFs()
linuxUserManager := user.NewLinuxUserManager(fs) linuxUserManager := user.NewLinuxUserManager(fs)
if err := run(fs, linuxUserManager, configMap); err != nil { if err := run(log, fs, linuxUserManager, configMap); err != nil {
// So far there is only one error path in this code, and this is getting the user directories... So just make the error specific here for now. // So far there is only one error path in this code, and this is getting the user directories... So just make the error specific here for now.
log.Panicf("Failed to retrieve existing user directories: %v", err) log.With(zap.Error(err)).Fatalf("Failed to retrieve existing user directories")
} }
} }
@ -92,21 +97,28 @@ func loadClientSet() (*kubernetes.Clientset, error) {
} }
// deployKeys creates or evicts users based on the ConfigMap and deploy their SSH keys. // deployKeys creates or evicts users based on the ConfigMap and deploy their SSH keys.
func deployKeys(ctx context.Context, configMap *v1.ConfigMap, fs afero.Fs, linuxUserManager user.LinuxUserManager, userMap map[string]uidGIDPair, sshAccess *ssh.Access) { func deployKeys(
ctx context.Context, log *logger.Logger, configMap *v1.ConfigMap, fs afero.Fs,
linuxUserManager user.LinuxUserManager, userMap map[string]uidGIDPair, sshAccess *ssh.Access,
) {
// If no ConfigMap exists or has been emptied, evict all users and exit. // If no ConfigMap exists or has been emptied, evict all users and exit.
if configMap == nil || len(configMap.Data) == 0 { if configMap == nil || len(configMap.Data) == 0 {
for username, ownership := range userMap { for username, ownership := range userMap {
log := log.With(zap.String("username", username))
if username != "root" { if username != "root" {
evictedUserPath := path.Join(evictedHomePath, username) evictedUserPath := path.Join(evictedHomePath, username)
log.Printf("Evicting '%s' with previous UID '%d' and GID '%d' to %s.\n", username, ownership.UID, ownership.GID, evictedUserPath) log.With(zap.Uint32("UID", ownership.UID), zap.Uint32("GID", ownership.GID)).
Infof("Evicting user to %q", evictedUserPath)
if err := evictUser(username, fs, linuxUserManager); err != nil { if err := evictUser(username, fs, linuxUserManager); err != nil {
log.Printf("Did not evict '%s': %v\n", username, err) log.With(zap.Error(err)).Errorf("Did not evict user")
continue continue
} }
} else { } else {
log.Infof("Removing any old keys for 'root', if existent")
// Remove root's SSH key specifically instead of evicting the whole directory. // Remove root's SSH key specifically instead of evicting the whole directory.
if err := evictRootKey(fs, linuxUserManager); err != nil && !os.IsNotExist(err) { if err := evictRootKey(fs, linuxUserManager); err != nil && !os.IsNotExist(err) {
log.Printf("Failed to remove previously existing root key: %v\n", err) log.With(zap.Error(err)).Errorf("Failed to remove previously existing root key")
continue continue
} }
} }
@ -118,25 +130,36 @@ func deployKeys(ctx context.Context, configMap *v1.ConfigMap, fs afero.Fs, linux
// First, recreate users that already existed, if they are defined in the configMap. // First, recreate users that already existed, if they are defined in the configMap.
// For users which do not exist, we move their user directories to avoid accidental takeovers but also loss of data. // For users which do not exist, we move their user directories to avoid accidental takeovers but also loss of data.
for username, ownership := range userMap { for username, ownership := range userMap {
log := log.With(zap.String("username", username))
if username != "root" { if username != "root" {
if _, ok := configMap.Data[username]; ok { if _, ok := configMap.Data[username]; ok {
log.Printf("Recreating '%s' with UID %d and GID %d, if not existent.\n", username, ownership.UID, ownership.GID) log.With(zap.Uint32("UID", ownership.UID), zap.Uint32("GID", ownership.GID)).
if err := linuxUserManager.Creator.CreateUserWithSpecificUIDAndGID(ctx, username, int(ownership.UID), int(ownership.GID)); err != nil { Infof("Recreating user, if not existent")
log.Printf("Did not recreate '%s': %v\n", username, err)
if err := linuxUserManager.Creator.CreateUserWithSpecificUIDAndGID(
ctx, username, int(ownership.UID), int(ownership.GID),
); err != nil {
if errors.Is(err, user.ErrUserOrGroupAlreadyExists) {
log.Infof("User already exists, skipping")
} else {
log.With(zap.Error(err)).Errorf("Failed to recreate user")
}
continue continue
} }
} else { } else {
evictedUserPath := path.Join(evictedHomePath, username) evictedUserPath := path.Join(evictedHomePath, username)
log.Printf("Evicting '%s' with previous UID '%d' and GID '%d' to %s.\n", username, ownership.UID, ownership.GID, evictedUserPath) log.With(zap.Uint32("UID", ownership.UID), zap.Uint32("GID", ownership.GID)).
Infof("Evicting user to %q", evictedUserPath)
if err := evictUser(username, fs, linuxUserManager); err != nil { if err := evictUser(username, fs, linuxUserManager); err != nil {
log.Printf("Did not to evict '%s': %v\n", username, err) log.With(zap.Error(err)).Errorf("Did not evict user")
continue continue
} }
} }
} else { } else {
log.Infof("Removing any old keys for 'root', if existent")
// Always remove the root key first, even if it is about to be redeployed. // Always remove the root key first, even if it is about to be redeployed.
if err := evictRootKey(fs, linuxUserManager); err != nil && !os.IsNotExist(err) { if err := evictRootKey(fs, linuxUserManager); err != nil && !os.IsNotExist(err) {
log.Printf("Failed to remove previously existing root key: %v\n", err) log.With(zap.Error(err)).Errorf("Failed to remove previously existing root key")
continue continue
} }
} }
@ -144,25 +167,30 @@ func deployKeys(ctx context.Context, configMap *v1.ConfigMap, fs afero.Fs, linux
// Then, create the remaining users from the configMap (if remaining) and deploy SSH keys for all users. // Then, create the remaining users from the configMap (if remaining) and deploy SSH keys for all users.
for username, publicKey := range configMap.Data { for username, publicKey := range configMap.Data {
log := log.With(zap.String("username", username))
if _, ok := userMap[username]; !ok { if _, ok := userMap[username]; !ok {
log.Printf("Creating user '%s'\n", username) log.Infof("Creating user")
if err := linuxUserManager.Creator.CreateUser(ctx, username); err != nil { if err := linuxUserManager.Creator.CreateUser(ctx, username); err != nil {
log.Printf("Failed to create '%s': %v\n", username, err) if errors.Is(err, user.ErrUserOrGroupAlreadyExists) {
log.Infof("User already exists, skipping")
} else {
log.With(zap.Error(err)).Errorf("Failed to create user")
}
continue continue
} }
} }
// If we created an user, let's actually get the home directory instead of assuming it's the same as the normal home directory. // If we created a user, let's actually get the home directory instead of assuming it's the same as the normal home directory.
user, err := linuxUserManager.GetLinuxUser(username) user, err := linuxUserManager.GetLinuxUser(username)
if err != nil { if err != nil {
log.Printf("Failed to retrieve information about user '%s': %v\n", username, err) log.With(zap.Error(err)).Errorf("Failed to retrieve information about user")
continue continue
} }
// Delete already deployed keys // Delete already deployed keys
pathToSSHKeys := filepath.Join(user.Home, relativePathToSSHKeys) pathToSSHKeys := filepath.Join(user.Home, relativePathToSSHKeys)
if err := fs.Remove(pathToSSHKeys); err != nil && !os.IsNotExist(err) { if err := fs.Remove(pathToSSHKeys); err != nil && !os.IsNotExist(err) {
log.Printf("Failed to delete remaining managed SSH keys for '%s': %v\n", username, err) log.With(zap.Error(err)).Errorf("Failed to delete remaining managed SSH keys for user")
continue continue
} }
@ -172,15 +200,15 @@ func deployKeys(ctx context.Context, configMap *v1.ConfigMap, fs afero.Fs, linux
PublicKey: publicKey, PublicKey: publicKey,
} }
log.Printf("Deploying new SSH key for '%s'.\n", username) log.Infof("Deploying new SSH key for user")
if err := sshAccess.DeployAuthorizedKey(context.Background(), newKey); err != nil { if err := sshAccess.DeployAuthorizedKey(context.Background(), newKey); err != nil {
log.Printf("Failed to deploy SSH keys for '%s': %v\n", username, err) log.With(zap.Error(err)).Errorf("Failed to deploy SSH keys for user")
continue continue
} }
} }
} }
// evictUser moves an user directory to evictedPath and changes their owner recursive to root. // evictUser moves a user directory to evictedPath and changes their owner recursive to root.
func evictUser(username string, fs afero.Fs, linuxUserManager user.LinuxUserManager) error { func evictUser(username string, fs afero.Fs, linuxUserManager user.LinuxUserManager) error {
if _, err := linuxUserManager.GetLinuxUser(username); err == nil { if _, err := linuxUserManager.GetLinuxUser(username); err == nil {
return fmt.Errorf("user '%s' still seems to exist", username) return fmt.Errorf("user '%s' still seems to exist", username)
@ -219,7 +247,6 @@ func evictUser(username string, fs afero.Fs, linuxUserManager user.LinuxUserMana
// evictRootKey removes the root key from the filesystem, instead of evicting the whole user directory. // evictRootKey removes the root key from the filesystem, instead of evicting the whole user directory.
func evictRootKey(fs afero.Fs, linuxUserManager user.LinuxUserManager) error { func evictRootKey(fs afero.Fs, linuxUserManager user.LinuxUserManager) error {
log.Println("Removing any old keys for 'root', if existent.")
user, err := linuxUserManager.GetLinuxUser("root") user, err := linuxUserManager.GetLinuxUser("root")
if err != nil { if err != nil {
return err return err
@ -235,9 +262,9 @@ func evictRootKey(fs afero.Fs, linuxUserManager user.LinuxUserManager) error {
} }
// retrieveConfigMap contacts the Kubernetes API server and retrieves the ssh-users ConfigMap. // retrieveConfigMap contacts the Kubernetes API server and retrieves the ssh-users ConfigMap.
func retrieveConfigMap() (*v1.ConfigMap, error) { func retrieveConfigMap(log *logger.Logger) (*v1.ConfigMap, error) {
// Authenticate with the Kubernetes API and get the information from the ssh-users ConfigMap to recreate the users we need. // Authenticate with the Kubernetes API and get the information from the ssh-users ConfigMap to recreate the users we need.
log.Println("Authenticating with Kubernetes...") log.Infof("Authenticating with Kubernetes...")
clientset, err := loadClientSet() clientset, err := loadClientSet()
if err != nil { if err != nil {
return nil, err return nil, err
@ -246,7 +273,7 @@ func retrieveConfigMap() (*v1.ConfigMap, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel() defer cancel()
log.Println("Requesting 'ssh-users' ConfigMap...") log.Infof("Requesting 'ssh-users' ConfigMap...")
configmap, err := clientset.CoreV1().ConfigMaps("kube-system").Get(ctx, "ssh-users", v1Options.GetOptions{}) configmap, err := clientset.CoreV1().ConfigMaps("kube-system").Get(ctx, "ssh-users", v1Options.GetOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
@ -256,7 +283,7 @@ func retrieveConfigMap() (*v1.ConfigMap, error) {
} }
// generateUserMap iterates the list of existing home directories to create a map of previously existing usernames to their previous respective UID and GID. // generateUserMap iterates the list of existing home directories to create a map of previously existing usernames to their previous respective UID and GID.
func generateUserMap(fs afero.Fs) (map[string]uidGIDPair, error) { func generateUserMap(log *logger.Logger, fs afero.Fs) (map[string]uidGIDPair, error) {
// Go through the normalHomePath directory, and create a mapping of existing user names in combination with their owner's UID & GID. // Go through the normalHomePath directory, and create a mapping of existing user names in combination with their owner's UID & GID.
// We use this information later to create missing users under the same UID and GID to avoid breakage. // We use this information later to create missing users under the same UID and GID to avoid breakage.
fileInfo, err := afero.ReadDir(fs, normalHomePath) fileInfo, err := afero.ReadDir(fs, normalHomePath)
@ -268,12 +295,14 @@ func generateUserMap(fs afero.Fs) (map[string]uidGIDPair, error) {
userMap["root"] = uidGIDPair{UID: 0, GID: 0} userMap["root"] = uidGIDPair{UID: 0, GID: 0}
// This will fail under MemMapFS, since it's not UNIX-compatible. // This will fail under MemMapFS, since it's not UNIX-compatible.
for _, singleInfo := range fileInfo { for _, singleInfo := range fileInfo {
log := log.With("username", singleInfo.Name())
// Fail gracefully instead of hard. // Fail gracefully instead of hard.
if stat, ok := singleInfo.Sys().(*syscall.Stat_t); ok { if stat, ok := singleInfo.Sys().(*syscall.Stat_t); ok {
userMap[singleInfo.Name()] = uidGIDPair{UID: stat.Uid, GID: stat.Gid} userMap[singleInfo.Name()] = uidGIDPair{UID: stat.Uid, GID: stat.Gid}
log.Printf("Found home directory for '%s' (%d:%d).\n", singleInfo.Name(), stat.Uid, stat.Gid) log.With(zap.Uint32("UID", stat.Uid), zap.Uint32("GID", stat.Gid)).
Infof("Found home directory for user")
} else { } else {
log.Printf("WARNING: Failed to retrieve UNIX stat for %s. User will not be evicted, or if this directory belongs to an user that is to be created later, it might be created under a different UID/GID than before.\n", singleInfo.Name()) log.Warnf("Failed to retrieve UNIX stat for user. User will not be evicted, or if this directory belongs to a user that is to be created later, it might be created under a different UID/GID than before")
continue continue
} }
} }
@ -281,17 +310,17 @@ func generateUserMap(fs afero.Fs) (map[string]uidGIDPair, error) {
return userMap, nil return userMap, nil
} }
func run(fs afero.Fs, linuxUserManager user.LinuxUserManager, configMap *v1.ConfigMap) error { func run(log *logger.Logger, fs afero.Fs, linuxUserManager user.LinuxUserManager, configMap *v1.ConfigMap) error {
sshAccess := ssh.NewAccess(linuxUserManager) sshAccess := ssh.NewAccess(log, linuxUserManager)
// Generate userMap containing existing user directories and their ownership // Generate userMap containing existing user directories and their ownership
userMap, err := generateUserMap(fs) userMap, err := generateUserMap(log, fs)
if err != nil { if err != nil {
return err return err
} }
// Try to deploy keys based on configmap. // Try to deploy keys based on configmap.
deployKeys(context.Background(), configMap, fs, linuxUserManager, userMap, sshAccess) deployKeys(context.Background(), log, configMap, fs, linuxUserManager, userMap, sshAccess)
return nil return nil
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/deploy/ssh"
"github.com/edgelesssys/constellation/internal/deploy/user" "github.com/edgelesssys/constellation/internal/deploy/user"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -164,86 +165,89 @@ func TestDeployKeys(t *testing.T) {
}, },
}, },
} }
for _, tc := range testCases { for name, tc := range testCases {
fs := afero.NewMemMapFs() t.Run(name, func(t *testing.T) {
require.NoError(fs.MkdirAll(normalHomePath, 0o700)) fs := afero.NewMemMapFs()
require.NoError(fs.Mkdir("/etc", 0o644)) require.NoError(fs.MkdirAll(normalHomePath, 0o700))
_, err := fs.Create("/etc/passwd") require.NoError(fs.Mkdir("/etc", 0o644))
require.NoError(err) _, err := fs.Create("/etc/passwd")
// Create fake user directories
for user := range tc.existingUsers {
userHomePath := path.Join(normalHomePath, user)
err := fs.MkdirAll(userHomePath, 0o700)
require.NoError(err)
require.NoError(fs.Chown(userHomePath, int(tc.existingUsers[user].UID), int(tc.existingUsers[user].GID)))
}
linuxUserManager := user.NewLinuxUserManagerFake(fs)
sshAccess := ssh.NewAccess(linuxUserManager)
deployKeys(context.Background(), tc.configMap, fs, linuxUserManager, tc.existingUsers, sshAccess)
// Unfourtunaly, we cannot retrieve the UID/GID from afero's MemMapFs without weird hacks,
// as it does not have getters and it is not exported.
if tc.configMap != nil && tc.existingUsers != nil {
// Parse /etc/passwd and check for users
passwdEntries, err := linuxUserManager.Passwd.Parse(fs)
require.NoError(err) require.NoError(err)
// Check recreation or deletion // Create fake user directories
for user := range tc.existingUsers { for user := range tc.existingUsers {
if _, ok := tc.configMap.Data[user]; ok { userHomePath := path.Join(normalHomePath, user)
checkHomeDirectory(user, fs, assert, true) err := fs.MkdirAll(userHomePath, 0o700)
require.NoError(err)
require.NoError(fs.Chown(userHomePath, int(tc.existingUsers[user].UID), int(tc.existingUsers[user].GID)))
}
// Check if user exists in /etc/passwd log := logger.NewTest(t)
userEntry, ok := passwdEntries[user] linuxUserManager := user.NewLinuxUserManagerFake(fs)
assert.True(ok) sshAccess := ssh.NewAccess(log, linuxUserManager)
deployKeys(context.Background(), log, tc.configMap, fs, linuxUserManager, tc.existingUsers, sshAccess)
// Check if user has been recreated with correct UID/GID // Unfortunately, we cannot retrieve the UID/GID from afero's MemMapFs without weird hacks,
actualUID, err := strconv.Atoi(userEntry.Uid) // as it does not have getters and it is not exported.
assert.NoError(err) if tc.configMap != nil && tc.existingUsers != nil {
assert.EqualValues(tc.existingUsers[user].UID, actualUID) // Parse /etc/passwd and check for users
actualGID, err := strconv.Atoi(userEntry.Gid) passwdEntries, err := linuxUserManager.Passwd.Parse(fs)
assert.NoError(err) require.NoError(err)
assert.EqualValues(tc.existingUsers[user].GID, actualGID)
// Check if the user has the right keys // Check recreation or deletion
checkSSHKeys(user, fs, assert, tc.configMap.Data[user]+"\n") for user := range tc.existingUsers {
if _, ok := tc.configMap.Data[user]; ok {
checkHomeDirectory(user, fs, assert, true)
} else { // Check if user exists in /etc/passwd
// Check if home directory is not available anymore under the regular path userEntry, ok := passwdEntries[user]
checkHomeDirectory(user, fs, assert, false) assert.True(ok)
// Check if home directory has been evicted // Check if user has been recreated with correct UID/GID
homeDirs, err := afero.ReadDir(fs, evictedHomePath) actualUID, err := strconv.Atoi(userEntry.Uid)
require.NoError(err) assert.NoError(err)
assert.EqualValues(tc.existingUsers[user].UID, actualUID)
actualGID, err := strconv.Atoi(userEntry.Gid)
assert.NoError(err)
assert.EqualValues(tc.existingUsers[user].GID, actualGID)
var userDirectoryName string // Check if the user has the right keys
for _, singleDir := range homeDirs { checkSSHKeys(user, fs, assert, tc.configMap.Data[user]+"\n")
if strings.Contains(singleDir.Name(), user+"_") {
userDirectoryName = singleDir.Name() } else {
break // Check if home directory is not available anymore under the regular path
checkHomeDirectory(user, fs, assert, false)
// Check if home directory has been evicted
homeDirs, err := afero.ReadDir(fs, evictedHomePath)
require.NoError(err)
var userDirectoryName string
for _, singleDir := range homeDirs {
if strings.Contains(singleDir.Name(), user+"_") {
userDirectoryName = singleDir.Name()
break
}
} }
assert.NotEmpty(userDirectoryName)
// Check if user does not exist in /etc/passwd
_, ok := passwdEntries[user]
assert.False(ok)
} }
assert.NotEmpty(userDirectoryName)
// Check if user does not exist in /etc/passwd
_, ok := passwdEntries[user]
assert.False(ok)
}
}
// Check creation of new users
for user := range tc.configMap.Data {
// We already checked recreated or evicted users, so skip them.
if _, ok := tc.existingUsers[user]; ok {
continue
} }
checkHomeDirectory(user, fs, assert, true) // Check creation of new users
checkSSHKeys(user, fs, assert, tc.configMap.Data[user]+"\n") for user := range tc.configMap.Data {
// We already checked recreated or evicted users, so skip them.
if _, ok := tc.existingUsers[user]; ok {
continue
}
checkHomeDirectory(user, fs, assert, true)
checkSSHKeys(user, fs, assert, tc.configMap.Data[user]+"\n")
}
} }
} })
} }
} }

View File

@ -15,52 +15,61 @@ import (
"github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials" "github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"k8s.io/klog/v2" "go.uber.org/zap"
"go.uber.org/zap/zapcore"
) )
func main() { func main() {
provider := flag.String("cloud-provider", "", "cloud service provider this binary is running on") provider := flag.String("cloud-provider", "", "cloud service provider this binary is running on")
kmsEndpoint := flag.String("kms-endpoint", "", "endpoint of Constellations key management service") kmsEndpoint := flag.String("kms-endpoint", "", "endpoint of Constellations key management service")
verbosity := flag.Int("v", 0, "log verbosity in zap logging levels. Use -1 for debug information, 0 for info, 1 for warn, 2 for error")
klog.InitFlags(nil)
flag.Parse() flag.Parse()
defer klog.Flush() log := logger.New(logger.JSONLog, zapcore.Level(*verbosity))
klog.V(2).Infof("\nConstellation Node Activation Service\nVersion: %s\nRunning on: %s", constants.VersionInfo, *provider) log.With(zap.String("version", constants.VersionInfo), zap.String("cloudProvider", *provider)).
Infof("Constellation Node Activation Service")
handler := file.NewHandler(afero.NewOsFs()) handler := file.NewHandler(afero.NewOsFs())
validator, err := validator.New(*provider, handler) validator, err := validator.New(log.Named("validator"), *provider, handler)
if err != nil { if err != nil {
flag.Usage() flag.Usage()
klog.Exitf("failed to create validator: %s", err) log.With(zap.Error(err)).Fatalf("Failed to create validator")
} }
creds := atlscredentials.New(nil, []atls.Validator{validator}) creds := atlscredentials.New(nil, []atls.Validator{validator})
kubeadm, err := kubeadm.New() kubeadm, err := kubeadm.New(log.Named("kubeadm"))
if err != nil { if err != nil {
klog.Exitf("failed to create kubeadm: %s", err) log.With(zap.Error(err)).Fatalf("Failed to create kubeadm")
} }
kms := kms.New(*kmsEndpoint) kms := kms.New(log.Named("kms"), *kmsEndpoint)
server := server.New(handler, kubernetesca.New(handler), kubeadm, kms) server := server.New(
log.Named("server"),
handler,
kubernetesca.New(log.Named("certificateAuthority"), handler),
kubeadm,
kms,
)
watcher, err := watcher.New(validator) watcher, err := watcher.New(log.Named("fileWatcher"), validator)
if err != nil { if err != nil {
klog.Exitf("failed to create watcher for measurements updates: %s", err) log.With(zap.Error(err)).Fatalf("Failed to create watcher for measurements updates")
} }
defer watcher.Close() defer watcher.Close()
go func() { go func() {
klog.V(4).Infof("starting file watcher for measurements file %s", filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename)) log.Infof("starting file watcher for measurements file %s", filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename))
if err := watcher.Watch(filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename)); err != nil { if err := watcher.Watch(filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename)); err != nil {
klog.Exitf("failed to watch measurements file: %s", err) log.With(zap.Error(err)).Fatalf("Failed to watch measurements file")
} }
}() }()
if err := server.Run(creds, strconv.Itoa(constants.ActivationServicePort)); err != nil { if err := server.Run(creds, strconv.Itoa(constants.ActivationServicePort)); err != nil {
klog.Exitf("failed to run server: %s", err) log.With(zap.Error(err)).Fatalf("Failed to run server")
} }
} }

View File

@ -4,21 +4,24 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto" "github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto"
"go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"k8s.io/klog/v2"
) )
// Client interacts with Constellation's key management service. // Client interacts with Constellation's key management service.
type Client struct { type Client struct {
log *logger.Logger
endpoint string endpoint string
grpc grpcClient grpc grpcClient
} }
// New creates a new KMS. // New creates a new KMS.
func New(endpoint string) Client { func New(log *logger.Logger, endpoint string) Client {
return Client{ return Client{
log: log,
endpoint: endpoint, endpoint: endpoint,
grpc: client{}, grpc: client{},
} }
@ -26,15 +29,17 @@ func New(endpoint string) Client {
// GetDEK returns a data encryption key for the given UUID. // GetDEK returns a data encryption key for the given UUID.
func (c Client) GetDataKey(ctx context.Context, uuid string, length int) ([]byte, error) { func (c Client) GetDataKey(ctx context.Context, uuid string, length int) ([]byte, error) {
log := c.log.With(zap.String("diskUUID", uuid), zap.String("endpoint", c.endpoint))
// TODO: update credentials if we enable aTLS on the KMS // TODO: update credentials if we enable aTLS on the KMS
// For now this is fine since traffic is only routed through the Constellation cluster // For now this is fine since traffic is only routed through the Constellation cluster
log.Infof("Connecting to KMS at %s", c.endpoint)
conn, err := grpc.DialContext(ctx, c.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) conn, err := grpc.DialContext(ctx, c.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer conn.Close() defer conn.Close()
klog.V(6).Infof("GetDataKey: connecting to KMS at %s", c.endpoint) log.Infof("Requesting data key")
res, err := c.grpc.GetDataKey( res, err := c.grpc.GetDataKey(
ctx, ctx,
&kmsproto.GetDataKeyRequest{ &kmsproto.GetDataKeyRequest{
@ -47,6 +52,7 @@ func (c Client) GetDataKey(ctx context.Context, uuid string, length int) ([]byte
return nil, fmt.Errorf("fetching data encryption key from Constellation KMS: %w", err) return nil, fmt.Errorf("fetching data encryption key from Constellation KMS: %w", err)
} }
log.Infof("Data key request successful")
return res.DataKey, nil return res.DataKey, nil
} }

View File

@ -5,6 +5,7 @@ import (
"errors" "errors"
"testing" "testing"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto" "github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -41,7 +42,10 @@ func TestGetDataKey(t *testing.T) {
listener := bufconn.Listen(1) listener := bufconn.Listen(1)
defer listener.Close() defer listener.Close()
client := New(listener.Addr().String()) client := New(
logger.NewTest(t),
listener.Addr().String(),
)
client.grpc = tc.client client.grpc = tc.client

View File

@ -7,6 +7,7 @@ import (
"github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -14,7 +15,6 @@ import (
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
certutil "k8s.io/client-go/util/cert" certutil "k8s.io/client-go/util/cert"
bootstraputil "k8s.io/cluster-bootstrap/token/util" bootstraputil "k8s.io/cluster-bootstrap/token/util"
"k8s.io/klog/v2"
bootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1" bootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
@ -26,12 +26,13 @@ import (
// Kubeadm manages joining of new nodes. // Kubeadm manages joining of new nodes.
type Kubeadm struct { type Kubeadm struct {
log *logger.Logger
client clientset.Interface client clientset.Interface
file file.Handler file file.Handler
} }
// New creates a new Kubeadm instance. // New creates a new Kubeadm instance.
func New() (*Kubeadm, error) { func New(log *logger.Logger) (*Kubeadm, error) {
config, err := rest.InClusterConfig() config, err := rest.InClusterConfig()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get in-cluster config: %w", err) return nil, fmt.Errorf("failed to get in-cluster config: %w", err)
@ -43,6 +44,7 @@ func New() (*Kubeadm, error) {
file := file.NewHandler(afero.NewOsFs()) file := file.NewHandler(afero.NewOsFs())
return &Kubeadm{ return &Kubeadm{
log: log,
client: client, client: client,
file: file, file: file,
}, nil }, nil
@ -50,7 +52,7 @@ func New() (*Kubeadm, error) {
// GetJoinToken creates a new bootstrap (join) token, which a node can use to join the cluster. // GetJoinToken creates a new bootstrap (join) token, which a node can use to join the cluster.
func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) { func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
klog.V(6).Info("[kubeadm] Generating new random bootstrap token") k.log.Infof("Generating new random bootstrap token")
rawToken, err := bootstraputil.GenerateBootstrapToken() rawToken, err := bootstraputil.GenerateBootstrapToken()
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't generate random token: %w", err) return nil, fmt.Errorf("couldn't generate random token: %w", err)
@ -66,13 +68,13 @@ func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscov
} }
// create the token in Kubernetes // create the token in Kubernetes
klog.V(6).Info("[kubeadm] Creating bootstrap token in Kubernetes") k.log.Infof("Creating bootstrap token in Kubernetes")
if err := tokenphase.CreateNewTokens(k.client, []bootstraptoken.BootstrapToken{token}); err != nil { if err := tokenphase.CreateNewTokens(k.client, []bootstraptoken.BootstrapToken{token}); err != nil {
return nil, fmt.Errorf("creating bootstrap token: %w", err) return nil, fmt.Errorf("creating bootstrap token: %w", err)
} }
// parse Kubernetes CA certs // parse Kubernetes CA certs
klog.V(6).Info("[kubeadm] Preparing join token for new node") k.log.Infof("Preparing join token for new node")
rawConfig, err := k.file.Read(constants.CoreOSAdminConfFilename) rawConfig, err := k.file.Read(constants.CoreOSAdminConfFilename)
if err != nil { if err != nil {
return nil, fmt.Errorf("loading kubeconfig file: %w", err) return nil, fmt.Errorf("loading kubeconfig file: %w", err)
@ -94,6 +96,7 @@ func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscov
publicKeyPins = append(publicKeyPins, pubkeypin.Hash(caCert)) publicKeyPins = append(publicKeyPins, pubkeypin.Hash(caCert))
} }
k.log.Infof("Join token creation successful")
return &kubeadm.BootstrapTokenDiscovery{ return &kubeadm.BootstrapTokenDiscovery{
Token: tokenStr.String(), Token: tokenStr.String(),
APIServerEndpoint: "10.118.0.1:6443", // This is not HA and should be replaced with the IP of the node issuing the token APIServerEndpoint: "10.118.0.1:6443", // This is not HA and should be replaced with the IP of the node issuing the token
@ -104,13 +107,13 @@ func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscov
// GetControlPlaneCertificateKey uploads Kubernetes encrypted CA certificates to Kubernetes and returns the decryption key. // GetControlPlaneCertificateKey uploads Kubernetes encrypted CA certificates to Kubernetes and returns the decryption key.
// The key can be used by new nodes to join the cluster as a control plane node. // The key can be used by new nodes to join the cluster as a control plane node.
func (k *Kubeadm) GetControlPlaneCertificateKey() (string, error) { func (k *Kubeadm) GetControlPlaneCertificateKey() (string, error) {
klog.V(6).Info("[kubeadm] Creating new random control plane certificate key") k.log.Infof("Creating new random control plane certificate key")
key, err := copycerts.CreateCertificateKey() key, err := copycerts.CreateCertificateKey()
if err != nil { if err != nil {
return "", fmt.Errorf("couldn't create control plane certificate key: %w", err) return "", fmt.Errorf("couldn't create control plane certificate key: %w", err)
} }
klog.V(6).Info("[kubeadm] Uploading certs to Kubernetes") k.log.Infof("Uploading certs to Kubernetes")
cfg := &kubeadmapi.InitConfiguration{ cfg := &kubeadmapi.InitConfiguration{
ClusterConfiguration: kubeadmapi.ClusterConfiguration{ ClusterConfiguration: kubeadmapi.ClusterConfiguration{
CertificatesDir: constants.KubeadmCertificateDir, CertificatesDir: constants.KubeadmCertificateDir,

View File

@ -7,6 +7,7 @@ import (
"github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -77,6 +78,7 @@ kind: Config`,
require := require.New(t) require := require.New(t)
client := &Kubeadm{ client := &Kubeadm{
log: logger.NewTest(t),
file: file.NewHandler(afero.NewMemMapFs()), file: file.NewHandler(afero.NewMemMapFs()),
client: fake.NewSimpleClientset(), client: fake.NewSimpleClientset(),
} }
@ -117,6 +119,7 @@ func TestGetControlPlaneCertificateKey(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
client := &Kubeadm{ client := &Kubeadm{
log: logger.NewTest(t),
client: tc.client, client: tc.client,
} }

View File

@ -12,7 +12,7 @@ import (
"github.com/edgelesssys/constellation/coordinator/util" "github.com/edgelesssys/constellation/coordinator/util"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"k8s.io/klog/v2" "github.com/edgelesssys/constellation/internal/logger"
) )
const ( const (
@ -22,19 +22,21 @@ const (
// KubernetesCA handles signing of certificates using the Kubernetes root CA. // KubernetesCA handles signing of certificates using the Kubernetes root CA.
type KubernetesCA struct { type KubernetesCA struct {
log *logger.Logger
file file.Handler file file.Handler
} }
// New creates a new KubernetesCA. // New creates a new KubernetesCA.
func New(fileHandler file.Handler) *KubernetesCA { func New(log *logger.Logger, fileHandler file.Handler) *KubernetesCA {
return &KubernetesCA{ return &KubernetesCA{
log: log,
file: fileHandler, file: fileHandler,
} }
} }
// GetCertificate creates a certificate for a node and signs it using the Kubernetes root CA. // GetCertificate creates a certificate for a node and signs it using the Kubernetes root CA.
func (c KubernetesCA) GetCertificate(nodeName string) (cert []byte, key []byte, err error) { func (c KubernetesCA) GetCertificate(nodeName string) (cert []byte, key []byte, err error) {
klog.V(6).Info("CA: loading Kubernetes CA certificate") c.log.Debugf("Loading Kubernetes CA certificate")
parentCertRaw, err := c.file.Read(caCertFilename) parentCertRaw, err := c.file.Read(caCertFilename)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@ -45,7 +47,7 @@ func (c KubernetesCA) GetCertificate(nodeName string) (cert []byte, key []byte,
return nil, nil, err return nil, nil, err
} }
klog.V(6).Info("CA: loading Kubernetes CA private key") c.log.Debugf("Loading Kubernetes CA private key")
parentKeyRaw, err := c.file.Read(caKeyFilename) parentKeyRaw, err := c.file.Read(caKeyFilename)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@ -66,7 +68,7 @@ func (c KubernetesCA) GetCertificate(nodeName string) (cert []byte, key []byte,
return nil, nil, err return nil, nil, err
} }
klog.V(6).Info("CA: creating kubelet private key") c.log.Infof("Creating kubelet private key")
privK, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) privK, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@ -80,7 +82,7 @@ func (c KubernetesCA) GetCertificate(nodeName string) (cert []byte, key []byte,
Bytes: keyBytes, Bytes: keyBytes,
}) })
klog.V(6).Info("CA: creating kubelet certificate") c.log.Infof("Creating kubelet certificate")
serialNumber, err := util.GenerateCertificateSerialNumber() serialNumber, err := util.GenerateCertificateSerialNumber()
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err

View File

@ -14,6 +14,7 @@ import (
"time" "time"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -89,7 +90,10 @@ Q29uc3RlbGxhdGlvbg==
require.NoError(file.Write(caKeyFilename, tc.caKey, 0o644)) require.NoError(file.Write(caKeyFilename, tc.caKey, 0o644))
} }
ca := New(file) ca := New(
logger.NewTest(t),
file,
)
nodeName := "test" nodeName := "test"
kubeCert, kubeKey, err := ca.GetCertificate(nodeName) kubeCert, kubeKey, err := ca.GetCertificate(nodeName)

View File

@ -11,18 +11,20 @@ import (
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types" attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
"github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"github.com/edgelesssys/constellation/internal/grpc/grpc_klog" "github.com/edgelesssys/constellation/internal/grpc/grpclog"
"github.com/edgelesssys/constellation/internal/logger"
"go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"k8s.io/klog/v2"
kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
) )
// Server implements the core logic of Constellation's node activation service. // Server implements the core logic of Constellation's node activation service.
type Server struct { type Server struct {
log *logger.Logger
file file.Handler file file.Handler
joinTokenGetter joinTokenGetter joinTokenGetter joinTokenGetter
dataKeyGetter dataKeyGetter dataKeyGetter dataKeyGetter
@ -31,8 +33,9 @@ type Server struct {
} }
// New initializes a new Server. // New initializes a new Server.
func New(fileHandler file.Handler, ca certificateAuthority, joinTokenGetter joinTokenGetter, dataKeyGetter dataKeyGetter) *Server { func New(log *logger.Logger, fileHandler file.Handler, ca certificateAuthority, joinTokenGetter joinTokenGetter, dataKeyGetter dataKeyGetter) *Server {
return &Server{ return &Server{
log: log,
file: fileHandler, file: fileHandler,
joinTokenGetter: joinTokenGetter, joinTokenGetter: joinTokenGetter,
dataKeyGetter: dataKeyGetter, dataKeyGetter: dataKeyGetter,
@ -42,9 +45,10 @@ func New(fileHandler file.Handler, ca certificateAuthority, joinTokenGetter join
// Run starts the gRPC server on the given port, using the provided tlsConfig. // Run starts the gRPC server on the given port, using the provided tlsConfig.
func (s *Server) Run(creds credentials.TransportCredentials, port string) error { func (s *Server) Run(creds credentials.TransportCredentials, port string) error {
s.log.WithIncreasedLevel(zap.WarnLevel).Named("gRPC").ReplaceGRPCLogger()
grpcServer := grpc.NewServer( grpcServer := grpc.NewServer(
grpc.Creds(creds), grpc.Creds(creds),
grpc.UnaryInterceptor(grpc_klog.LogGRPC(2)), s.log.Named("gRPC").GetServerUnaryInterceptor(),
) )
proto.RegisterAPIServer(grpcServer, s) proto.RegisterAPIServer(grpcServer, s)
@ -53,7 +57,7 @@ func (s *Server) Run(creds credentials.TransportCredentials, port string) error
if err != nil { if err != nil {
return fmt.Errorf("failed to listen: %s", err) return fmt.Errorf("failed to listen: %s", err)
} }
klog.V(2).Infof("starting activation service on %s", lis.Addr().String()) s.log.Infof("Starting activation service on %s", lis.Addr().String())
return grpcServer.Serve(lis) return grpcServer.Serve(lis)
} }
@ -63,12 +67,13 @@ func (s *Server) Run(creds credentials.TransportCredentials, port string) error
// - Kubernetes join token. // - Kubernetes join token.
// - cluster and owner ID to taint the node as initialized. // - cluster and owner ID to taint the node as initialized.
func (s *Server) ActivateWorkerNode(ctx context.Context, req *proto.ActivateWorkerNodeRequest) (*proto.ActivateWorkerNodeResponse, error) { func (s *Server) ActivateWorkerNode(ctx context.Context, req *proto.ActivateWorkerNodeRequest) (*proto.ActivateWorkerNodeResponse, error) {
nodeParameters, err := s.activateNode(ctx, "ActivateWorker", req.DiskUuid, req.NodeName) s.log.Infof("ActivateWorkerNode called")
nodeParameters, err := s.activateNode(ctx, req.DiskUuid, req.NodeName)
if err != nil { if err != nil {
return nil, fmt.Errorf("ActivateNode failed: %w", err) return nil, fmt.Errorf("ActivateWorkerNode failed: %w", err)
} }
klog.V(4).Info("ActivateNode successful") s.log.Infof("ActivateWorkerNode successful")
return &proto.ActivateWorkerNodeResponse{ return &proto.ActivateWorkerNodeResponse{
StateDiskKey: nodeParameters.stateDiskKey, StateDiskKey: nodeParameters.stateDiskKey,
@ -89,9 +94,10 @@ func (s *Server) ActivateWorkerNode(ctx context.Context, req *proto.ActivateWork
// - cluster and owner ID to taint the node as initialized. // - cluster and owner ID to taint the node as initialized.
// - a decryption key for CA certificates uploaded to the Kubernetes cluster. // - a decryption key for CA certificates uploaded to the Kubernetes cluster.
func (s *Server) ActivateControlPlaneNode(ctx context.Context, req *proto.ActivateControlPlaneNodeRequest) (*proto.ActivateControlPlaneNodeResponse, error) { func (s *Server) ActivateControlPlaneNode(ctx context.Context, req *proto.ActivateControlPlaneNodeRequest) (*proto.ActivateControlPlaneNodeResponse, error) {
nodeParameters, err := s.activateNode(ctx, "ActivateControlPlane", req.DiskUuid, req.NodeName) s.log.Infof("ActivateControlPlaneNode called")
nodeParameters, err := s.activateNode(ctx, req.DiskUuid, req.NodeName)
if err != nil { if err != nil {
return nil, fmt.Errorf("ActivateControlPlane failed: %w", err) return nil, fmt.Errorf("ActivateControlPlaneNode failed: %w", err)
} }
certKey, err := s.joinTokenGetter.GetControlPlaneCertificateKey() certKey, err := s.joinTokenGetter.GetControlPlaneCertificateKey()
@ -99,7 +105,7 @@ func (s *Server) ActivateControlPlaneNode(ctx context.Context, req *proto.Activa
return nil, fmt.Errorf("ActivateControlPlane failed: %w", err) return nil, fmt.Errorf("ActivateControlPlane failed: %w", err)
} }
klog.V(4).Info("ActivateControlPlane successful") s.log.Infof("ActivateControlPlaneNode successful")
return &proto.ActivateControlPlaneNodeResponse{ return &proto.ActivateControlPlaneNodeResponse{
StateDiskKey: nodeParameters.stateDiskKey, StateDiskKey: nodeParameters.stateDiskKey,
@ -114,29 +120,30 @@ func (s *Server) ActivateControlPlaneNode(ctx context.Context, req *proto.Activa
}, nil }, nil
} }
func (s *Server) activateNode(ctx context.Context, logPrefix, diskUUID, nodeName string) (nodeParameters, error) { func (s *Server) activateNode(ctx context.Context, diskUUID, nodeName string) (nodeParameters, error) {
klog.V(4).Infof("%s: loading IDs", logPrefix) log := s.log.With(zap.String("peerAddress", grpclog.PeerAddrFromContext(ctx)))
log.Infof("Loading IDs")
var id attestationtypes.ID var id attestationtypes.ID
if err := s.file.ReadJSON(filepath.Join(constants.ActivationBasePath, constants.ActivationIDFilename), &id); err != nil { if err := s.file.ReadJSON(filepath.Join(constants.ActivationBasePath, constants.ActivationIDFilename), &id); err != nil {
klog.Errorf("unable to load IDs: %s", err) log.With(zap.Error(err)).Errorf("Unable to load IDs")
return nodeParameters{}, status.Errorf(codes.Internal, "unable to load IDs: %s", err) return nodeParameters{}, status.Errorf(codes.Internal, "unable to load IDs: %s", err)
} }
klog.V(4).Infof("%s: requesting disk encryption key", logPrefix) log.Infof("Requesting disk encryption key")
stateDiskKey, err := s.dataKeyGetter.GetDataKey(ctx, diskUUID, constants.StateDiskKeyLength) stateDiskKey, err := s.dataKeyGetter.GetDataKey(ctx, diskUUID, constants.StateDiskKeyLength)
if err != nil { if err != nil {
klog.Errorf("unable to get key for stateful disk: %s", err) log.With(zap.Error(err)).Errorf("Unable to get key for stateful disk")
return nodeParameters{}, status.Errorf(codes.Internal, "unable to get key for stateful disk: %s", err) return nodeParameters{}, status.Errorf(codes.Internal, "unable to get key for stateful disk: %s", err)
} }
klog.V(4).Infof("%s: creating Kubernetes join token", logPrefix) log.Infof("Creating Kubernetes join token")
kubeArgs, err := s.joinTokenGetter.GetJoinToken(constants.KubernetesJoinTokenTTL) kubeArgs, err := s.joinTokenGetter.GetJoinToken(constants.KubernetesJoinTokenTTL)
if err != nil { if err != nil {
klog.Errorf("unable to generate Kubernetes join arguments: %s", err) log.With(zap.Error(err)).Errorf("Unable to generate Kubernetes join arguments")
return nodeParameters{}, status.Errorf(codes.Internal, "unable to generate Kubernetes join arguments: %s", err) return nodeParameters{}, status.Errorf(codes.Internal, "unable to generate Kubernetes join arguments: %s", err)
} }
klog.V(4).Infof("%s: creating signed kubelet certificate", logPrefix) log.Infof("Creating signed kubelet certificate")
kubeletCert, kubeletKey, err := s.ca.GetCertificate(nodeName) kubeletCert, kubeletKey, err := s.ca.GetCertificate(nodeName)
if err != nil { if err != nil {
return nodeParameters{}, status.Errorf(codes.Internal, "unable to generate kubelet certificate: %s", err) return nodeParameters{}, status.Errorf(codes.Internal, "unable to generate kubelet certificate: %s", err)

View File

@ -12,6 +12,7 @@ import (
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types" attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
"github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -131,9 +132,15 @@ func TestActivateNode(t *testing.T) {
if len(tc.id) > 0 { if len(tc.id) > 0 {
require.NoError(file.Write(filepath.Join(constants.ActivationBasePath, constants.ActivationIDFilename), tc.id, 0o644)) require.NoError(file.Write(filepath.Join(constants.ActivationBasePath, constants.ActivationIDFilename), tc.id, 0o644))
} }
api := New(file, tc.ca, tc.kubeadm, tc.kms) api := New(
logger.NewTest(t),
file,
tc.ca,
tc.kubeadm,
tc.kms,
)
resp, err := api.activateNode(context.Background(), "test", "uuid", "test") resp, err := api.activateNode(context.Background(), "uuid", "test")
if tc.wantErr { if tc.wantErr {
assert.Error(err) assert.Error(err)
return return
@ -212,7 +219,13 @@ func TestActivateWorkerNode(t *testing.T) {
file := file.NewHandler(afero.NewMemMapFs()) file := file.NewHandler(afero.NewMemMapFs())
require.NoError(file.Write(filepath.Join(constants.ActivationBasePath, constants.ActivationIDFilename), tc.id, 0o644)) require.NoError(file.Write(filepath.Join(constants.ActivationBasePath, constants.ActivationIDFilename), tc.id, 0o644))
api := New(file, tc.ca, tc.kubeadm, tc.kms) api := New(
logger.NewTest(t),
file,
tc.ca,
tc.kubeadm,
tc.kms,
)
resp, err := api.ActivateWorkerNode(context.Background(), &activationproto.ActivateWorkerNodeRequest{DiskUuid: "uuid", NodeName: "test"}) resp, err := api.ActivateWorkerNode(context.Background(), &activationproto.ActivateWorkerNodeRequest{DiskUuid: "uuid", NodeName: "test"})
if tc.wantErr { if tc.wantErr {
@ -311,7 +324,13 @@ func TestActivateControlPlaneNode(t *testing.T) {
file := file.NewHandler(afero.NewMemMapFs()) file := file.NewHandler(afero.NewMemMapFs())
require.NoError(file.Write(filepath.Join(constants.ActivationBasePath, constants.ActivationIDFilename), tc.id, 0o644)) require.NoError(file.Write(filepath.Join(constants.ActivationBasePath, constants.ActivationIDFilename), tc.id, 0o644))
api := New(file, tc.ca, tc.kubeadm, tc.kms) api := New(
logger.NewTest(t),
file,
tc.ca,
tc.kubeadm,
tc.kms,
)
resp, err := api.ActivateControlPlaneNode(context.Background(), &activationproto.ActivateControlPlaneNodeRequest{DiskUuid: "uuid", NodeName: "test"}) resp, err := api.ActivateControlPlaneNode(context.Background(), &activationproto.ActivateControlPlaneNodeRequest{DiskUuid: "uuid", NodeName: "test"})
if tc.wantErr { if tc.wantErr {

View File

@ -13,11 +13,12 @@ import (
"github.com/edgelesssys/constellation/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"k8s.io/klog/v2" "github.com/edgelesssys/constellation/internal/logger"
) )
// Updatable implements an updatable atls.Validator. // Updatable implements an updatable atls.Validator.
type Updatable struct { type Updatable struct {
log *logger.Logger
mux sync.Mutex mux sync.Mutex
newValidator newValidatorFunc newValidator newValidatorFunc
fileHandler file.Handler fileHandler file.Handler
@ -25,7 +26,7 @@ type Updatable struct {
} }
// New initializes a new updatable validator. // New initializes a new updatable validator.
func New(csp string, fileHandler file.Handler) (*Updatable, error) { func New(log *logger.Logger, csp string, fileHandler file.Handler) (*Updatable, error) {
var newValidator newValidatorFunc var newValidator newValidatorFunc
switch cloudprovider.FromString(csp) { switch cloudprovider.FromString(csp) {
case cloudprovider.Azure: case cloudprovider.Azure:
@ -39,6 +40,7 @@ func New(csp string, fileHandler file.Handler) (*Updatable, error) {
} }
u := &Updatable{ u := &Updatable{
log: log,
newValidator: newValidator, newValidator: newValidator,
fileHandler: fileHandler, fileHandler: fileHandler,
} }
@ -66,13 +68,13 @@ func (u *Updatable) Update() error {
u.mux.Lock() u.mux.Lock()
defer u.mux.Unlock() defer u.mux.Unlock()
klog.V(4).Info("Updating expected measurements") u.log.Infof("Updating expected measurements")
var measurements map[uint32][]byte var measurements map[uint32][]byte
if err := u.fileHandler.ReadJSON(filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename), &measurements); err != nil { if err := u.fileHandler.ReadJSON(filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename), &measurements); err != nil {
return err return err
} }
klog.V(6).Infof("New measurements: %v", measurements) u.log.Debugf("New measurements: %v", measurements)
u.Validator = u.newValidator(measurements) u.Validator = u.newValidator(measurements)

View File

@ -16,6 +16,7 @@ import (
"github.com/edgelesssys/constellation/internal/atls" "github.com/edgelesssys/constellation/internal/atls"
"github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -67,7 +68,11 @@ func TestNewUpdateableValidator(t *testing.T) {
)) ))
} }
_, err := New(tc.provider, handler) _, err := New(
logger.NewTest(t),
tc.provider,
handler,
)
if tc.wantErr { if tc.wantErr {
assert.Error(err) assert.Error(err)
} else { } else {
@ -88,7 +93,11 @@ func TestUpdate(t *testing.T) {
handler := file.NewHandler(afero.NewMemMapFs()) handler := file.NewHandler(afero.NewMemMapFs())
// create server // create server
validator := &Updatable{newValidator: newValidator, fileHandler: handler} validator := &Updatable{
log: logger.NewTest(t),
newValidator: newValidator,
fileHandler: handler,
}
// Update should fail if the file does not exist // Update should fail if the file does not exist
assert.Error(validator.Update()) assert.Error(validator.Update())
@ -139,6 +148,7 @@ func TestUpdateConcurrency(t *testing.T) {
handler := file.NewHandler(afero.NewMemMapFs()) handler := file.NewHandler(afero.NewMemMapFs())
validator := &Updatable{ validator := &Updatable{
log: logger.NewTest(t),
fileHandler: handler, fileHandler: handler,
newValidator: func(m map[uint32][]byte) atls.Validator { newValidator: func(m map[uint32][]byte) atls.Validator {
return fakeValidator{fakeOID: fakeOID{1, 3, 9900, 1}} return fakeValidator{fakeOID: fakeOID{1, 3, 9900, 1}}

View File

@ -3,25 +3,28 @@ package watcher
import ( import (
"fmt" "fmt"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/fsnotify/fsnotify" "github.com/fsnotify/fsnotify"
"k8s.io/klog/v2" "go.uber.org/zap"
) )
// FileWatcher watches for changes to the file and calls the waiter's Update method. // FileWatcher watches for changes to the file and calls the waiter's Update method.
type FileWatcher struct { type FileWatcher struct {
log *logger.Logger
updater updater updater updater
watcher eventWatcher watcher eventWatcher
done chan struct{} done chan struct{}
} }
// New creates a new FileWatcher for the given validator. // New creates a new FileWatcher for the given validator.
func New(updater updater) (*FileWatcher, error) { func New(log *logger.Logger, updater updater) (*FileWatcher, error) {
watcher, err := fsnotify.NewWatcher() watcher, err := fsnotify.NewWatcher()
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &FileWatcher{ return &FileWatcher{
log: log,
watcher: &fsnotifyWatcher{watcher}, watcher: &fsnotifyWatcher{watcher},
updater: updater, updater: updater,
done: make(chan struct{}, 1), done: make(chan struct{}, 1),
@ -39,6 +42,7 @@ func (f *FileWatcher) Close() error {
// Watch starts watching the file at the given path. // Watch starts watching the file at the given path.
// It will call the watcher's Update method when the file is modified. // It will call the watcher's Update method when the file is modified.
func (f *FileWatcher) Watch(file string) error { func (f *FileWatcher) Watch(file string) error {
log := f.log.With("file", file)
defer func() { f.done <- struct{}{} }() defer func() { f.done <- struct{}{} }()
if err := f.watcher.Add(file); err != nil { if err := f.watcher.Add(file); err != nil {
return err return err
@ -48,28 +52,28 @@ func (f *FileWatcher) Watch(file string) error {
select { select {
case event, ok := <-f.watcher.Events(): case event, ok := <-f.watcher.Events():
if !ok { if !ok {
klog.V(4).Infof("watcher closed") log.Infof("Watcher closed")
return nil return nil
} }
// file changes may be indicated by either a WRITE, CHMOD, CREATE or RENAME event // file changes may be indicated by either a WRITE, CHMOD, CREATE or RENAME event
if event.Op&(fsnotify.Write|fsnotify.Chmod|fsnotify.Create|fsnotify.Rename) != 0 { if event.Op&(fsnotify.Write|fsnotify.Chmod|fsnotify.Create|fsnotify.Rename) != 0 {
if err := f.updater.Update(); err != nil { if err := f.updater.Update(); err != nil {
klog.Errorf("failed to update activation validator: %s", err) log.With(zap.Error(err)).Errorf("Failed to update activation validator")
} }
} }
// if a file gets removed, e.g. by a rename event, we need to re-add the file to the watcher // if a file gets removed, e.g. by a rename event, we need to re-add the file to the watcher
if event.Op&fsnotify.Remove == fsnotify.Remove { if event.Op&fsnotify.Remove == fsnotify.Remove {
if err := f.watcher.Add(event.Name); err != nil { if err := f.watcher.Add(event.Name); err != nil {
klog.Errorf("failed to re-add file %q to watcher: %s", event.Name, err) log.With(zap.Error(err)).Errorf("Failed to re-add file to watcher")
return fmt.Errorf("failed to re-add file %q to watcher: %w", event.Name, err) return fmt.Errorf("failed to re-add file %q to watcher: %w", event.Name, err)
} }
} }
case err := <-f.watcher.Errors(): case err := <-f.watcher.Errors():
if err != nil { if err != nil {
klog.Errorf("watching for measurements updates: %s", err) log.With(zap.Error(err)).Errorf("Watching for measurements updates")
return fmt.Errorf("watching for measurements updates: %w", err) return fmt.Errorf("watching for measurements updates: %w", err)
} }
} }

View File

@ -6,6 +6,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/fsnotify/fsnotify" "github.com/fsnotify/fsnotify"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -99,6 +100,7 @@ func TestWatcher(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
watcher := &FileWatcher{ watcher := &FileWatcher{
log: logger.NewTest(t),
updater: tc.updater, updater: tc.updater,
watcher: tc.watcher, watcher: tc.watcher,
done: make(chan struct{}, 1), done: make(chan struct{}, 1),

View File

@ -4,11 +4,13 @@ import (
"context" "context"
"github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/deploy/ssh"
"github.com/edgelesssys/constellation/internal/logger"
"go.uber.org/zap/zapcore"
) )
// CreateSSHUsers creates UNIX users with respective SSH access on the system the coordinator is running on when defined in the config. // CreateSSHUsers creates UNIX users with respective SSH access on the system the coordinator is running on when defined in the config.
func (c *Core) CreateSSHUsers(sshUserKeys []ssh.UserKey) error { func (c *Core) CreateSSHUsers(sshUserKeys []ssh.UserKey) error {
sshAccess := ssh.NewAccess(c.linuxUserManager) sshAccess := ssh.NewAccess(logger.New(logger.JSONLog, zapcore.InfoLevel), c.linuxUserManager)
ctx := context.Background() ctx := context.Background()
for _, pair := range sshUserKeys { for _, pair := range sshUserKeys {

View File

@ -12,7 +12,9 @@ import (
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types" attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
"github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/deploy/ssh"
"github.com/edgelesssys/constellation/internal/deploy/user" "github.com/edgelesssys/constellation/internal/deploy/user"
"github.com/edgelesssys/constellation/internal/logger"
kms "github.com/edgelesssys/constellation/kms/server/setup" kms "github.com/edgelesssys/constellation/kms/server/setup"
"go.uber.org/zap/zapcore"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
) )
@ -163,7 +165,7 @@ func (c *fakeCore) UpdateDiskPassphrase(passphrase string) error {
} }
func (c *fakeCore) CreateSSHUsers(sshUserKeys []ssh.UserKey) error { func (c *fakeCore) CreateSSHUsers(sshUserKeys []ssh.UserKey) error {
sshAccess := ssh.NewAccess(c.linuxUserManager) sshAccess := ssh.NewAccess(logger.New(logger.PlainLog, zapcore.DebugLevel), c.linuxUserManager)
ctx := context.Background() ctx := context.Background()
for _, pair := range sshUserKeys { for _, pair := range sshUserKeys {

View File

@ -1,7 +1,6 @@
package main package main
import ( import (
"log"
"net" "net"
"os" "os"
"strings" "strings"
@ -15,22 +14,25 @@ import (
"github.com/edgelesssys/constellation/debugd/debugd/server" "github.com/edgelesssys/constellation/debugd/debugd/server"
"github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/deploy/ssh"
"github.com/edgelesssys/constellation/internal/deploy/user" "github.com/edgelesssys/constellation/internal/deploy/user"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"go.uber.org/zap/zapcore"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
func main() { func main() {
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
log := logger.New(logger.JSONLog, zapcore.InfoLevel)
fs := afero.NewOsFs() fs := afero.NewOsFs()
streamer := coordinator.NewFileStreamer(fs) streamer := coordinator.NewFileStreamer(fs)
serviceManager := deploy.NewServiceManager() serviceManager := deploy.NewServiceManager(log.Named("serviceManager"))
ssh := ssh.NewAccess(user.NewLinuxUserManager(fs)) ssh := ssh.NewAccess(log, user.NewLinuxUserManager(fs))
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
download := deploy.New(&net.Dialer{}, serviceManager, streamer) download := deploy.New(log.Named("download"), &net.Dialer{}, serviceManager, streamer)
var fetcher metadata.Fetcher var fetcher metadata.Fetcher
constellationCSP := strings.ToLower(os.Getenv("CONSTEL_CSP")) constellationCSP := strings.ToLower(os.Getenv("CONSTEL_CSP"))
switch constellationCSP { switch constellationCSP {
@ -47,11 +49,11 @@ func main() {
} }
fetcher = gcpFetcher fetcher = gcpFetcher
default: default:
log.Printf("Unknown / unimplemented cloud provider CONSTEL_CSP=%v\n", constellationCSP) log.Errorf("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", constellationCSP)
fetcher = fallback.Fetcher{} fetcher = fallback.Fetcher{}
} }
sched := metadata.NewScheduler(fetcher, ssh, download) sched := metadata.NewScheduler(log.Named("scheduler"), fetcher, ssh, download)
serv := server.New(ssh, serviceManager, streamer) serv := server.New(log.Named("server"), ssh, serviceManager, streamer)
if err := deploy.DeployDefaultServiceUnit(ctx, serviceManager); err != nil { if err := deploy.DeployDefaultServiceUnit(ctx, serviceManager); err != nil {
panic(err) panic(err)
} }
@ -59,7 +61,7 @@ func main() {
wg.Add(1) wg.Add(1)
go sched.Start(ctx, wg) go sched.Start(ctx, wg)
wg.Add(1) wg.Add(1)
go server.Start(wg, serv) go server.Start(log, wg, serv)
wg.Wait() wg.Wait()
} }

View File

@ -3,19 +3,21 @@ package deploy
import ( import (
"context" "context"
"fmt" "fmt"
"log"
"net" "net"
"time" "time"
"github.com/edgelesssys/constellation/debugd/coordinator" "github.com/edgelesssys/constellation/debugd/coordinator"
"github.com/edgelesssys/constellation/debugd/debugd" "github.com/edgelesssys/constellation/debugd/debugd"
pb "github.com/edgelesssys/constellation/debugd/service" pb "github.com/edgelesssys/constellation/debugd/service"
"github.com/edgelesssys/constellation/internal/logger"
"go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
) )
// Download downloads a coordinator from a given debugd instance. // Download downloads a coordinator from a given debugd instance.
type Download struct { type Download struct {
log *logger.Logger
dialer NetDialer dialer NetDialer
writer streamToFileWriter writer streamToFileWriter
serviceManager serviceManager serviceManager serviceManager
@ -23,8 +25,9 @@ type Download struct {
} }
// New creates a new Download. // New creates a new Download.
func New(dialer NetDialer, serviceManager serviceManager, writer streamToFileWriter) *Download { func New(log *logger.Logger, dialer NetDialer, serviceManager serviceManager, writer streamToFileWriter) *Download {
return &Download{ return &Download{
log: log,
dialer: dialer, dialer: dialer,
writer: writer, writer: writer,
serviceManager: serviceManager, serviceManager: serviceManager,
@ -34,12 +37,13 @@ func New(dialer NetDialer, serviceManager serviceManager, writer streamToFileWri
// DownloadCoordinator will open a new grpc connection to another instance, attempting to download a coordinator from that instance. // DownloadCoordinator will open a new grpc connection to another instance, attempting to download a coordinator from that instance.
func (d *Download) DownloadCoordinator(ctx context.Context, ip string) error { func (d *Download) DownloadCoordinator(ctx context.Context, ip string) error {
log := d.log.With(zap.String("ip", ip))
serverAddr := net.JoinHostPort(ip, debugd.DebugdPort) serverAddr := net.JoinHostPort(ip, debugd.DebugdPort)
// only retry download from same endpoint after backoff // only retry download from same endpoint after backoff
if lastAttempt, ok := d.attemptedDownloads[serverAddr]; ok && time.Since(lastAttempt) < debugd.CoordinatorDownloadRetryBackoff { if lastAttempt, ok := d.attemptedDownloads[serverAddr]; ok && time.Since(lastAttempt) < debugd.CoordinatorDownloadRetryBackoff {
return fmt.Errorf("download failed too recently: %v / %v", time.Since(lastAttempt), debugd.CoordinatorDownloadRetryBackoff) return fmt.Errorf("download failed too recently: %v / %v", time.Since(lastAttempt), debugd.CoordinatorDownloadRetryBackoff)
} }
log.Printf("Trying to download coordinator from %s\n", ip) log.Infof("Trying to download coordinator")
d.attemptedDownloads[serverAddr] = time.Now() d.attemptedDownloads[serverAddr] = time.Now()
conn, err := d.dial(ctx, serverAddr) conn, err := d.dial(ctx, serverAddr)
if err != nil { if err != nil {
@ -56,7 +60,7 @@ func (d *Download) DownloadCoordinator(ctx context.Context, ip string) error {
return fmt.Errorf("streaming coordinator from other instance: %w", err) return fmt.Errorf("streaming coordinator from other instance: %w", err)
} }
log.Printf("Successfully downloaded coordinator from %s\n", ip) log.Infof("Successfully downloaded coordinator")
// after the upload succeeds, try to restart the coordinator // after the upload succeeds, try to restart the coordinator
restartAction := ServiceManagerRequest{ restartAction := ServiceManagerRequest{

View File

@ -13,6 +13,7 @@ import (
"github.com/edgelesssys/constellation/debugd/debugd" "github.com/edgelesssys/constellation/debugd/debugd"
pb "github.com/edgelesssys/constellation/debugd/service" pb "github.com/edgelesssys/constellation/debugd/service"
"github.com/edgelesssys/constellation/internal/grpc/testdialer" "github.com/edgelesssys/constellation/internal/grpc/testdialer"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
@ -93,6 +94,7 @@ func TestDownloadCoordinator(t *testing.T) {
go grpcServ.Serve(lis) go grpcServ.Serve(lis)
download := &Download{ download := &Download{
log: logger.NewTest(t),
dialer: dialer, dialer: dialer,
writer: writer, writer: writer,
serviceManager: &tc.serviceManager, serviceManager: &tc.serviceManager,

View File

@ -2,13 +2,13 @@ package deploy
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log"
"sync" "sync"
"github.com/edgelesssys/constellation/debugd/debugd" "github.com/edgelesssys/constellation/debugd/debugd"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"go.uber.org/zap"
) )
const ( const (
@ -40,15 +40,17 @@ type SystemdUnit struct {
// ServiceManager receives ServiceManagerRequests and units via channels and performs the requests / creates the unit files. // ServiceManager receives ServiceManagerRequests and units via channels and performs the requests / creates the unit files.
type ServiceManager struct { type ServiceManager struct {
log *logger.Logger
dbus dbusClient dbus dbusClient
fs afero.Fs fs afero.Fs
systemdUnitFilewriteLock sync.Mutex systemdUnitFilewriteLock sync.Mutex
} }
// NewServiceManager creates a new ServiceManager. // NewServiceManager creates a new ServiceManager.
func NewServiceManager() *ServiceManager { func NewServiceManager(log *logger.Logger) *ServiceManager {
fs := afero.NewOsFs() fs := afero.NewOsFs()
return &ServiceManager{ return &ServiceManager{
log: log,
dbus: &dbusWrapper{}, dbus: &dbusWrapper{},
fs: fs, fs: fs,
systemdUnitFilewriteLock: sync.Mutex{}, systemdUnitFilewriteLock: sync.Mutex{},
@ -78,6 +80,7 @@ type dbusConn interface {
// SystemdAction will perform a systemd action on a service unit (start, stop, restart, reload). // SystemdAction will perform a systemd action on a service unit (start, stop, restart, reload).
func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManagerRequest) error { func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManagerRequest) error {
log := s.log.With(zap.String("unit", request.Unit), zap.String("action", request.Action.String()))
conn, err := s.dbus.NewSystemdConnectionContext(ctx) conn, err := s.dbus.NewSystemdConnectionContext(ctx)
if err != nil { if err != nil {
return fmt.Errorf("establishing systemd connection: %w", err) return fmt.Errorf("establishing systemd connection: %w", err)
@ -94,14 +97,14 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag
case Reload: case Reload:
err = conn.ReloadContext(ctx) err = conn.ReloadContext(ctx)
default: default:
return errors.New("unknown systemd action: " + request.Action.String()) return fmt.Errorf("unknown systemd action: %s", request.Action.String())
} }
if err != nil { if err != nil {
return fmt.Errorf("performing systemd action %v on unit %v: %w", request.Action, request.Unit, err) return fmt.Errorf("performing systemd action %v on unit %v: %w", request.Action, request.Unit, err)
} }
if request.Action == Reload { if request.Action == Reload {
log.Println("daemon-reload succeeded") log.Infof("daemon-reload succeeded")
return nil return nil
} }
// Wait for the action to finish and then check if it was // Wait for the action to finish and then check if it was
@ -110,17 +113,18 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag
switch result { switch result {
case "done": case "done":
log.Printf("%s on systemd unit %s succeeded\n", request.Action, request.Unit) log.Infof("%s on systemd unit %s succeeded", request.Action, request.Unit)
return nil return nil
default: default:
return fmt.Errorf("performing action %v on systemd unit \"%v\" failed: expected \"%v\" but received \"%v\"", request.Action, request.Unit, "done", result) return fmt.Errorf("performing action %q on systemd unit %q failed: expected %q but received %q", request.Action.String(), request.Unit, "done", result)
} }
} }
// WriteSystemdUnitFile will write a systemd unit to disk. // WriteSystemdUnitFile will write a systemd unit to disk.
func (s *ServiceManager) WriteSystemdUnitFile(ctx context.Context, unit SystemdUnit) error { func (s *ServiceManager) WriteSystemdUnitFile(ctx context.Context, unit SystemdUnit) error {
log.Printf("Writing systemd unit file: %s/%s\n", systemdUnitFolder, unit.Name) log := s.log.With(zap.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name)))
log.Infof("Writing systemd unit file")
s.systemdUnitFilewriteLock.Lock() s.systemdUnitFilewriteLock.Lock()
defer s.systemdUnitFilewriteLock.Unlock() defer s.systemdUnitFilewriteLock.Unlock()
if err := afero.WriteFile(s.fs, fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name), []byte(unit.Contents), 0o644); err != nil { if err := afero.WriteFile(s.fs, fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name), []byte(unit.Contents), 0o644); err != nil {
@ -131,7 +135,7 @@ func (s *ServiceManager) WriteSystemdUnitFile(ctx context.Context, unit SystemdU
return fmt.Errorf("performing systemd daemon-reload: %w", err) return fmt.Errorf("performing systemd daemon-reload: %w", err)
} }
log.Printf("Wrote systemd unit file: %s/%s and performed daemon-reload\n", systemdUnitFolder, unit.Name) log.Infof("Wrote systemd unit file and performed daemon-reload")
return nil return nil
} }

View File

@ -7,6 +7,7 @@ import (
"sync" "sync"
"testing" "testing"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -95,6 +96,7 @@ func TestSystemdAction(t *testing.T) {
fs := afero.NewMemMapFs() fs := afero.NewMemMapFs()
manager := ServiceManager{ manager := ServiceManager{
log: logger.NewTest(t),
dbus: &tc.dbus, dbus: &tc.dbus,
fs: fs, fs: fs,
systemdUnitFilewriteLock: sync.Mutex{}, systemdUnitFilewriteLock: sync.Mutex{},
@ -173,6 +175,7 @@ func TestWriteSystemdUnitFile(t *testing.T) {
fs = afero.NewReadOnlyFs(fs) fs = afero.NewReadOnlyFs(fs)
} }
manager := ServiceManager{ manager := ServiceManager{
log: logger.NewTest(t),
dbus: &tc.dbus, dbus: &tc.dbus,
fs: fs, fs: fs,
systemdUnitFilewriteLock: sync.Mutex{}, systemdUnitFilewriteLock: sync.Mutex{},

View File

@ -4,12 +4,13 @@ import (
"context" "context"
"errors" "errors"
"io/fs" "io/fs"
"log"
"sync" "sync"
"time" "time"
"github.com/edgelesssys/constellation/debugd/debugd" "github.com/edgelesssys/constellation/debugd/debugd"
"github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/deploy/ssh"
"github.com/edgelesssys/constellation/internal/logger"
"go.uber.org/zap"
) )
// Fetcher retrieves other debugd IPs and SSH keys from cloud provider metadata. // Fetcher retrieves other debugd IPs and SSH keys from cloud provider metadata.
@ -20,14 +21,16 @@ type Fetcher interface {
// Scheduler schedules fetching of metadata using timers. // Scheduler schedules fetching of metadata using timers.
type Scheduler struct { type Scheduler struct {
log *logger.Logger
fetcher Fetcher fetcher Fetcher
ssh sshDeployer ssh sshDeployer
downloader downloader downloader downloader
} }
// NewScheduler returns a new scheduler. // NewScheduler returns a new scheduler.
func NewScheduler(fetcher Fetcher, ssh sshDeployer, downloader downloader) *Scheduler { func NewScheduler(log *logger.Logger, fetcher Fetcher, ssh sshDeployer, downloader downloader) *Scheduler {
return &Scheduler{ return &Scheduler{
log: log,
fetcher: fetcher, fetcher: fetcher,
ssh: ssh, ssh: ssh,
downloader: downloader, downloader: downloader,
@ -49,7 +52,7 @@ func (s *Scheduler) discoveryLoop(ctx context.Context, wg *sync.WaitGroup) {
// execute debugd discovery once at the start to skip wait for first tick // execute debugd discovery once at the start to skip wait for first tick
ips, err := s.fetcher.DiscoverDebugdIPs(ctx) ips, err := s.fetcher.DiscoverDebugdIPs(ctx)
if err != nil { if err != nil {
log.Printf("error occurred while discovering debugd IPs: %v\n", err) s.log.With(zap.Error(err)).Errorf("Discovering debugd IPs failed")
} else { } else {
if s.downloadCoordinator(ctx, ips) { if s.downloadCoordinator(ctx, ips) {
return return
@ -64,10 +67,10 @@ func (s *Scheduler) discoveryLoop(ctx context.Context, wg *sync.WaitGroup) {
case <-ticker.C: case <-ticker.C:
ips, err = s.fetcher.DiscoverDebugdIPs(ctx) ips, err = s.fetcher.DiscoverDebugdIPs(ctx)
if err != nil { if err != nil {
log.Printf("error occurred while discovering debugd IPs: %v\n", err) s.log.With(zap.Error(err)).Errorf("Discovering debugd IPs failed")
continue continue
} }
log.Printf("discovered instances: %v\n", ips) s.log.With(zap.Strings("ips", ips)).Infof("Discovered instances")
if s.downloadCoordinator(ctx, ips) { if s.downloadCoordinator(ctx, ips) {
return return
} }
@ -80,24 +83,19 @@ func (s *Scheduler) discoveryLoop(ctx context.Context, wg *sync.WaitGroup) {
// sshLoop discovers new ssh keys from cloud provider metadata periodically. // sshLoop discovers new ssh keys from cloud provider metadata periodically.
func (s *Scheduler) sshLoop(ctx context.Context, wg *sync.WaitGroup) { func (s *Scheduler) sshLoop(ctx context.Context, wg *sync.WaitGroup) {
defer wg.Done() defer wg.Done()
// execute ssh key search once at the start to skip wait for first tick
keys, err := s.fetcher.FetchSSHKeys(ctx)
if err != nil {
log.Printf("error occurred while fetching SSH keys: %v\n", err)
} else {
s.deploySSHKeys(ctx, keys)
}
ticker := time.NewTicker(debugd.SSHCheckInterval) ticker := time.NewTicker(debugd.SSHCheckInterval)
defer ticker.Stop() defer ticker.Stop()
for { for {
keys, err := s.fetcher.FetchSSHKeys(ctx)
if err != nil {
s.log.With(zap.Error(err)).Errorf("Fetching SSH keys failed")
} else {
s.deploySSHKeys(ctx, keys)
}
select { select {
case <-ticker.C: case <-ticker.C:
keys, err := s.fetcher.FetchSSHKeys(ctx)
if err != nil {
log.Printf("error occurred while fetching ssh keys: %v\n", err)
continue
}
s.deploySSHKeys(ctx, keys)
case <-ctx.Done(): case <-ctx.Done():
return return
} }
@ -116,7 +114,7 @@ func (s *Scheduler) downloadCoordinator(ctx context.Context, ips []string) (succ
// coordinator was already uploaded // coordinator was already uploaded
return true return true
} }
log.Printf("error occurred while downloading coordinator from %v: %v\n", ip, err) s.log.With(zap.Error(err), zap.String("peer", ip)).Errorf("Downloading coordinator from peer failed")
} }
return false return false
} }
@ -126,7 +124,7 @@ func (s *Scheduler) deploySSHKeys(ctx context.Context, keys []ssh.UserKey) {
for _, key := range keys { for _, key := range keys {
err := s.ssh.DeployAuthorizedKey(ctx, key) err := s.ssh.DeployAuthorizedKey(ctx, key)
if err != nil { if err != nil {
log.Printf("error occurred while deploying ssh key %v: %v\n", key, err) s.log.With(zap.Error(err), zap.Any("key", key)).Errorf("Deploying SSH key failed")
continue continue
} }
} }

View File

@ -8,6 +8,7 @@ import (
"time" "time"
"github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/deploy/ssh"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -74,6 +75,7 @@ func TestSchedulerStart(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), tc.timeout) ctx, cancel := context.WithTimeout(context.Background(), tc.timeout)
defer cancel() defer cancel()
scheduler := Scheduler{ scheduler := Scheduler{
log: logger.NewTest(t),
fetcher: &tc.fetcher, fetcher: &tc.fetcher,
ssh: &tc.ssh, ssh: &tc.ssh,
downloader: &tc.downloader, downloader: &tc.downloader,

View File

@ -5,7 +5,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io/fs" "io/fs"
"log"
"net" "net"
"sync" "sync"
@ -14,10 +13,13 @@ import (
"github.com/edgelesssys/constellation/debugd/debugd/deploy" "github.com/edgelesssys/constellation/debugd/debugd/deploy"
pb "github.com/edgelesssys/constellation/debugd/service" pb "github.com/edgelesssys/constellation/debugd/service"
"github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/deploy/ssh"
"github.com/edgelesssys/constellation/internal/logger"
"go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
type debugdServer struct { type debugdServer struct {
log *logger.Logger
ssh sshDeployer ssh sshDeployer
serviceManager serviceManager serviceManager serviceManager
streamer streamer streamer streamer
@ -25,8 +27,9 @@ type debugdServer struct {
} }
// New creates a new debugdServer according to the gRPC spec. // New creates a new debugdServer according to the gRPC spec.
func New(ssh sshDeployer, serviceManager serviceManager, streamer streamer) pb.DebugdServer { func New(log *logger.Logger, ssh sshDeployer, serviceManager serviceManager, streamer streamer) pb.DebugdServer {
return &debugdServer{ return &debugdServer{
log: log,
ssh: ssh, ssh: ssh,
serviceManager: serviceManager, serviceManager: serviceManager,
streamer: streamer, streamer: streamer,
@ -35,10 +38,10 @@ func New(ssh sshDeployer, serviceManager serviceManager, streamer streamer) pb.D
// UploadAuthorizedKeys receives a list of authorized keys and forwards them to a channel. // UploadAuthorizedKeys receives a list of authorized keys and forwards them to a channel.
func (s *debugdServer) UploadAuthorizedKeys(ctx context.Context, in *pb.UploadAuthorizedKeysRequest) (*pb.UploadAuthorizedKeysResponse, error) { func (s *debugdServer) UploadAuthorizedKeys(ctx context.Context, in *pb.UploadAuthorizedKeysRequest) (*pb.UploadAuthorizedKeysResponse, error) {
log.Println("Uploading authorized keys") s.log.Infof("Uploading authorized keys")
for _, key := range in.Keys { for _, key := range in.Keys {
if err := s.ssh.DeployAuthorizedKey(ctx, ssh.UserKey{Username: key.Username, PublicKey: key.KeyValue}); err != nil { if err := s.ssh.DeployAuthorizedKey(ctx, ssh.UserKey{Username: key.Username, PublicKey: key.KeyValue}); err != nil {
log.Printf("Uploading authorized keys failed: %v\n", err) s.log.With(zap.Error(err)).Errorf("Uploading authorized keys failed")
return &pb.UploadAuthorizedKeysResponse{ return &pb.UploadAuthorizedKeysResponse{
Status: pb.UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_FAILURE, Status: pb.UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_FAILURE,
}, nil }, nil
@ -58,7 +61,7 @@ func (s *debugdServer) UploadCoordinator(stream pb.Debugd_UploadCoordinatorServe
var responseStatus pb.UploadCoordinatorStatus var responseStatus pb.UploadCoordinatorStatus
defer func() { defer func() {
if err := s.serviceManager.SystemdAction(stream.Context(), startAction); err != nil { if err := s.serviceManager.SystemdAction(stream.Context(), startAction); err != nil {
log.Printf("Starting uploaded coordinator failed: %v\n", err) s.log.With(zap.Error(err)).Errorf("Starting uploaded coordinator failed")
if responseStatus == pb.UploadCoordinatorStatus_UPLOAD_COORDINATOR_SUCCESS { if responseStatus == pb.UploadCoordinatorStatus_UPLOAD_COORDINATOR_SUCCESS {
responseStatus = pb.UploadCoordinatorStatus_UPLOAD_COORDINATOR_START_FAILED responseStatus = pb.UploadCoordinatorStatus_UPLOAD_COORDINATOR_START_FAILED
} }
@ -67,33 +70,33 @@ func (s *debugdServer) UploadCoordinator(stream pb.Debugd_UploadCoordinatorServe
Status: responseStatus, Status: responseStatus,
}) })
}() }()
log.Println("Starting coordinator upload") s.log.Infof("Starting coordinator upload")
if err := s.streamer.WriteStream(debugd.CoordinatorDeployFilename, stream, true); err != nil { if err := s.streamer.WriteStream(debugd.CoordinatorDeployFilename, stream, true); err != nil {
if errors.Is(err, fs.ErrExist) { if errors.Is(err, fs.ErrExist) {
// coordinator was already uploaded // coordinator was already uploaded
log.Println("Coordinator already uploaded") s.log.Warnf("Coordinator already uploaded")
responseStatus = pb.UploadCoordinatorStatus_UPLOAD_COORDINATOR_FILE_EXISTS responseStatus = pb.UploadCoordinatorStatus_UPLOAD_COORDINATOR_FILE_EXISTS
return nil return nil
} }
log.Printf("Uploading coordinator failed: %v\n", err) s.log.With(zap.Error(err)).Errorf("Uploading coordinator failed")
responseStatus = pb.UploadCoordinatorStatus_UPLOAD_COORDINATOR_UPLOAD_FAILED responseStatus = pb.UploadCoordinatorStatus_UPLOAD_COORDINATOR_UPLOAD_FAILED
return fmt.Errorf("uploading coordinator: %w", err) return fmt.Errorf("uploading coordinator: %w", err)
} }
log.Println("Successfully uploaded coordinator") s.log.Infof("Successfully uploaded coordinator")
responseStatus = pb.UploadCoordinatorStatus_UPLOAD_COORDINATOR_SUCCESS responseStatus = pb.UploadCoordinatorStatus_UPLOAD_COORDINATOR_SUCCESS
return nil return nil
} }
// DownloadCoordinator streams the local coordinator binary to other instances. // DownloadCoordinator streams the local coordinator binary to other instances.
func (s *debugdServer) DownloadCoordinator(request *pb.DownloadCoordinatorRequest, stream pb.Debugd_DownloadCoordinatorServer) error { func (s *debugdServer) DownloadCoordinator(request *pb.DownloadCoordinatorRequest, stream pb.Debugd_DownloadCoordinatorServer) error {
log.Println("Sending coordinator to other instance") s.log.Infof("Sending coordinator to other instance")
return s.streamer.ReadStream(debugd.CoordinatorDeployFilename, stream, debugd.Chunksize, true) return s.streamer.ReadStream(debugd.CoordinatorDeployFilename, stream, debugd.Chunksize, true)
} }
// UploadSystemServiceUnits receives systemd service units, writes them to a service file and schedules a daemon-reload. // UploadSystemServiceUnits receives systemd service units, writes them to a service file and schedules a daemon-reload.
func (s *debugdServer) UploadSystemServiceUnits(ctx context.Context, in *pb.UploadSystemdServiceUnitsRequest) (*pb.UploadSystemdServiceUnitsResponse, error) { func (s *debugdServer) UploadSystemServiceUnits(ctx context.Context, in *pb.UploadSystemdServiceUnitsRequest) (*pb.UploadSystemdServiceUnitsResponse, error) {
log.Println("Uploading systemd service units") s.log.Infof("Uploading systemd service units")
for _, unit := range in.Units { for _, unit := range in.Units {
if err := s.serviceManager.WriteSystemdUnitFile(ctx, deploy.SystemdUnit{Name: unit.Name, Contents: unit.Contents}); err != nil { if err := s.serviceManager.WriteSystemdUnitFile(ctx, deploy.SystemdUnit{Name: unit.Name, Contents: unit.Contents}); err != nil {
return &pb.UploadSystemdServiceUnitsResponse{Status: pb.UploadSystemdServiceUnitsStatus_UPLOAD_SYSTEMD_SERVICE_UNITS_FAILURE}, nil return &pb.UploadSystemdServiceUnitsResponse{Status: pb.UploadSystemdServiceUnitsStatus_UPLOAD_SYSTEMD_SERVICE_UNITS_FAILURE}, nil
@ -104,15 +107,19 @@ func (s *debugdServer) UploadSystemServiceUnits(ctx context.Context, in *pb.Uplo
} }
// Start will start the gRPC server and block. // Start will start the gRPC server and block.
func Start(wg *sync.WaitGroup, serv pb.DebugdServer) { func Start(log *logger.Logger, wg *sync.WaitGroup, serv pb.DebugdServer) {
defer wg.Done() defer wg.Done()
grpcServer := grpc.NewServer()
grpcLog := log.Named("gRPC")
grpcLog.WithIncreasedLevel(zap.WarnLevel).ReplaceGRPCLogger()
grpcServer := grpc.NewServer(grpcLog.GetServerStreamInterceptor(), grpcLog.GetServerUnaryInterceptor())
pb.RegisterDebugdServer(grpcServer, serv) pb.RegisterDebugdServer(grpcServer, serv)
lis, err := net.Listen("tcp", net.JoinHostPort("0.0.0.0", debugd.DebugdPort)) lis, err := net.Listen("tcp", net.JoinHostPort("0.0.0.0", debugd.DebugdPort))
if err != nil { if err != nil {
log.Fatalf("listening failed: %v", err) log.With(zap.Error(err)).Fatalf("Listening failed")
} }
log.Println("gRPC server is waiting for connections") log.Infof("gRPC server is waiting for connections")
grpcServer.Serve(lis) grpcServer.Serve(lis)
} }

View File

@ -13,6 +13,7 @@ import (
pb "github.com/edgelesssys/constellation/debugd/service" pb "github.com/edgelesssys/constellation/debugd/service"
"github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/deploy/ssh"
"github.com/edgelesssys/constellation/internal/grpc/testdialer" "github.com/edgelesssys/constellation/internal/grpc/testdialer"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -73,6 +74,7 @@ func TestUploadAuthorizedKeys(t *testing.T) {
require := require.New(t) require := require.New(t)
serv := debugdServer{ serv := debugdServer{
log: logger.NewTest(t),
ssh: &tc.ssh, ssh: &tc.ssh,
serviceManager: &tc.serviceManager, serviceManager: &tc.serviceManager,
streamer: &fakeStreamer{}, streamer: &fakeStreamer{},
@ -148,6 +150,7 @@ func TestUploadCoordinator(t *testing.T) {
require := require.New(t) require := require.New(t)
serv := debugdServer{ serv := debugdServer{
log: logger.NewTest(t),
ssh: &tc.ssh, ssh: &tc.ssh,
serviceManager: &tc.serviceManager, serviceManager: &tc.serviceManager,
streamer: &tc.streamer, streamer: &tc.streamer,
@ -218,6 +221,7 @@ func TestDownloadCoordinator(t *testing.T) {
require := require.New(t) require := require.New(t)
serv := debugdServer{ serv := debugdServer{
log: logger.NewTest(t),
ssh: &tc.ssh, ssh: &tc.ssh,
serviceManager: &tc.serviceManager, serviceManager: &tc.serviceManager,
streamer: &tc.streamer, streamer: &tc.streamer,
@ -298,6 +302,7 @@ func TestUploadSystemServiceUnits(t *testing.T) {
require := require.New(t) require := require.New(t)
serv := debugdServer{ serv := debugdServer{
log: logger.NewTest(t),
ssh: &tc.ssh, ssh: &tc.ssh,
serviceManager: &tc.serviceManager, serviceManager: &tc.serviceManager,
streamer: &fakeStreamer{}, streamer: &fakeStreamer{},

View File

@ -96,6 +96,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/sso v1.11.2 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.11.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.16.2 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.16.2 // indirect
github.com/aws/smithy-go v1.11.2 // indirect github.com/aws/smithy-go v1.11.2 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect
@ -114,6 +115,7 @@ require (
github.com/google/tink/go v1.6.1 // indirect github.com/google/tink/go v1.6.1 // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gax-go/v2 v2.2.0 // indirect github.com/googleapis/gax-go/v2 v2.2.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/native v1.0.0 // indirect github.com/josharian/native v1.0.0 // indirect
@ -136,6 +138,7 @@ require (
go.opencensus.io v0.23.0 // indirect go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.9.0 // indirect go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.8.0 // indirect go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.21.0 // indirect
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a // indirect golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a // indirect

View File

@ -248,6 +248,9 @@ github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE=
github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
@ -398,6 +401,7 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
@ -552,6 +556,7 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@ -786,6 +791,7 @@ github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzL
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
@ -999,6 +1005,7 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
@ -1012,6 +1019,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI= gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI=
golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=

View File

@ -3,11 +3,12 @@ package ssh
import ( import (
"context" "context"
"fmt" "fmt"
"log"
"os" "os"
"sync" "sync"
"github.com/edgelesssys/constellation/internal/deploy/user" "github.com/edgelesssys/constellation/internal/deploy/user"
"github.com/edgelesssys/constellation/internal/logger"
"go.uber.org/zap"
) )
// UserKey describes an user that should be created with a corresponding public SSH key. // UserKey describes an user that should be created with a corresponding public SSH key.
@ -18,14 +19,16 @@ type UserKey struct {
// Access reads SSH public keys from a channel, creates the specified users if required and writes the public keys to the users authorized_keys file. // Access reads SSH public keys from a channel, creates the specified users if required and writes the public keys to the users authorized_keys file.
type Access struct { type Access struct {
log *logger.Logger
userManager user.LinuxUserManager userManager user.LinuxUserManager
authorized map[string]bool authorized map[string]bool
mux sync.Mutex mux sync.Mutex
} }
// NewAccess creates a new Access. // NewAccess creates a new Access.
func NewAccess(userManager user.LinuxUserManager) *Access { func NewAccess(log *logger.Logger, userManager user.LinuxUserManager) *Access {
return &Access{ return &Access{
log: log,
userManager: userManager, userManager: userManager,
mux: sync.Mutex{}, mux: sync.Mutex{},
authorized: map[string]bool{}, authorized: map[string]bool{},
@ -51,7 +54,7 @@ func (s *Access) DeployAuthorizedKey(ctx context.Context, sshKey UserKey) error
if s.alreadyAuthorized(sshKey) { if s.alreadyAuthorized(sshKey) {
return nil return nil
} }
log.Printf("Trying to deploy ssh key for %s\n", sshKey.Username) s.log.With(zap.String("username", sshKey.Username)).Infof("Trying to deploy ssh key for user")
user, err := s.userManager.EnsureLinuxUserExists(ctx, sshKey.Username) user, err := s.userManager.EnsureLinuxUserExists(ctx, sshKey.Username)
if err != nil { if err != nil {
return err return err
@ -87,6 +90,6 @@ func (s *Access) DeployAuthorizedKey(ctx context.Context, sshKey UserKey) error
return err return err
} }
s.rememberAuthorized(sshKey) s.rememberAuthorized(sshKey)
log.Printf("Successfully authorized %s\n", sshKey.Username) s.log.With(zap.String("username", sshKey.Username)).Infof("Successfully authorized user")
return nil return nil
} }

View File

@ -6,6 +6,7 @@ import (
"testing" "testing"
"github.com/edgelesssys/constellation/internal/deploy/user" "github.com/edgelesssys/constellation/internal/deploy/user"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -66,6 +67,7 @@ func TestDeploySSHAuthorizedKey(t *testing.T) {
authorized["user:ssh-rsa testkey"] = true authorized["user:ssh-rsa testkey"] = true
} }
sshAccess := Access{ sshAccess := Access{
log: logger.NewTest(t),
userManager: userManager, userManager: userManager,
mux: sync.Mutex{}, mux: sync.Mutex{},
authorized: authorized, authorized: authorized,

View File

@ -1,31 +0,0 @@
// grpc_klog provides a logging interceptor for the klog logger.
package grpc_klog
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
"k8s.io/klog/v2"
)
// LogGRPC writes a log with the name of every gRPC call or error it receives.
// Request parameters or responses are NOT logged.
func LogGRPC(level klog.Level) func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
// log the requests method name
var addr string
peer, ok := peer.FromContext(ctx)
if ok {
addr = peer.Addr.String()
}
klog.V(level).Infof("GRPC call from peer: %q: %s", addr, info.FullMethod)
// log errors, if any
resp, err := handler(ctx, req)
if err != nil {
klog.Errorf("GRPC error: %v", err)
}
return resp, err
}
}

View File

@ -0,0 +1,17 @@
// grpclog provides a logging utilities for gRPC.
package grpclog
import (
"context"
"google.golang.org/grpc/peer"
)
// PeerAddrFromContext returns a peer's address from context, or "unknown" if not found.
func PeerAddrFromContext(ctx context.Context) string {
p, ok := peer.FromContext(ctx)
if !ok {
return "unknown"
}
return p.Addr.String()
}

View File

@ -33,13 +33,16 @@ Use Fatalf() to log information about any errors that occurred and then exit the
package logger package logger
import ( import (
"fmt"
"os" "os"
"testing"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap"
grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags"
"go.uber.org/zap" "go.uber.org/zap"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -85,6 +88,13 @@ func New(logType LogType, logLevel zapcore.Level) *Logger {
return &Logger{logger: logger.Sugar()} return &Logger{logger: logger.Sugar()}
} }
// NewTestLogger creates a logger for unit / integration tests.
func NewTest(t *testing.T) *Logger {
return &Logger{
logger: zaptest.NewLogger(t).Sugar().Named(fmt.Sprintf("%q", t.Name())),
}
}
// Debugf logs a message at Debug level. // Debugf logs a message at Debug level.
// Debug logs are typically voluminous, and contain detailed information on the flow of execution. // Debug logs are typically voluminous, and contain detailed information on the flow of execution.
func (l *Logger) Debugf(format string, args ...any) { func (l *Logger) Debugf(format string, args ...any) {
@ -123,7 +133,7 @@ func (l *Logger) Sync() {
// WithIncreasedLevel returns a logger with increased logging level. // WithIncreasedLevel returns a logger with increased logging level.
func (l *Logger) WithIncreasedLevel(level zapcore.Level) *Logger { func (l *Logger) WithIncreasedLevel(level zapcore.Level) *Logger {
return &Logger{logger: l.GetZapLogger().WithOptions(zap.IncreaseLevel(level)).Sugar()} return &Logger{logger: l.getZapLogger().WithOptions(zap.IncreaseLevel(level)).Sugar()}
} }
// With returns a logger with structured context. // With returns a logger with structured context.
@ -136,16 +146,16 @@ func (l *Logger) Named(name string) *Logger {
return &Logger{logger: l.logger.Named(name)} return &Logger{logger: l.logger.Named(name)}
} }
// GetZapLogger returns the underlying zap logger. // ReplaceGRPCLogger replaces grpc's internal logger with the given logger.
func (l *Logger) GetZapLogger() *zap.Logger { func (l *Logger) ReplaceGRPCLogger() {
return l.logger.Desugar() grpc_zap.ReplaceGrpcLoggerV2(l.logger.Desugar())
} }
// GetServerUnaryInterceptor returns a gRPC server option for intercepting unary gRPC logs. // GetServerUnaryInterceptor returns a gRPC server option for intercepting unary gRPC logs.
func (l *Logger) GetServerUnaryInterceptor() grpc.ServerOption { func (l *Logger) GetServerUnaryInterceptor() grpc.ServerOption {
return grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( return grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
grpc_ctxtags.UnaryServerInterceptor(), grpc_ctxtags.UnaryServerInterceptor(),
grpc_zap.UnaryServerInterceptor(l.GetZapLogger()), grpc_zap.UnaryServerInterceptor(l.getZapLogger()),
)) ))
} }
@ -153,20 +163,25 @@ func (l *Logger) GetServerUnaryInterceptor() grpc.ServerOption {
func (l *Logger) GetServerStreamInterceptor() grpc.ServerOption { func (l *Logger) GetServerStreamInterceptor() grpc.ServerOption {
return grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( return grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
grpc_ctxtags.StreamServerInterceptor(), grpc_ctxtags.StreamServerInterceptor(),
grpc_zap.StreamServerInterceptor(l.GetZapLogger()), grpc_zap.StreamServerInterceptor(l.getZapLogger()),
)) ))
} }
// GetClientUnaryInterceptor returns a gRPC client option for intercepting unary gRPC logs. // GetClientUnaryInterceptor returns a gRPC client option for intercepting unary gRPC logs.
func (l *Logger) GetClientUnaryInterceptor() grpc.DialOption { func (l *Logger) GetClientUnaryInterceptor() grpc.DialOption {
return grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient( return grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(
grpc_zap.UnaryClientInterceptor(l.GetZapLogger()), grpc_zap.UnaryClientInterceptor(l.getZapLogger()),
)) ))
} }
// GetClientStreamInterceptor returns a gRPC client option for intercepting stream gRPC logs. // GetClientStreamInterceptor returns a gRPC client option for intercepting stream gRPC logs.
func (l *Logger) GetClientStreamInterceptor() grpc.DialOption { func (l *Logger) GetClientStreamInterceptor() grpc.DialOption {
return grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient( return grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(
grpc_zap.StreamClientInterceptor(l.GetZapLogger()), grpc_zap.StreamClientInterceptor(l.getZapLogger()),
)) ))
} }
// getZapLogger returns the underlying zap logger.
func (l *Logger) getZapLogger() *zap.Logger {
return l.logger.Desugar()
}

View File

@ -9,13 +9,13 @@ import (
"github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"github.com/edgelesssys/constellation/internal/grpc/grpc_klog" "github.com/edgelesssys/constellation/internal/logger"
"github.com/edgelesssys/constellation/kms/server/kmsapi" "github.com/edgelesssys/constellation/kms/server/kmsapi"
"github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto" "github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto"
"github.com/edgelesssys/constellation/kms/server/setup" "github.com/edgelesssys/constellation/kms/server/setup"
"github.com/spf13/afero" "github.com/spf13/afero"
"go.uber.org/zap" "go.uber.org/zap"
"k8s.io/klog/v2" "go.uber.org/zap/zapcore"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -24,43 +24,42 @@ func main() {
port := flag.String("port", "9000", "Port gRPC server listens on") port := flag.String("port", "9000", "Port gRPC server listens on")
masterSecretPath := flag.String("master-secret", "/constellation/constellation-mastersecret.base64", "Path to the Constellation master secret") masterSecretPath := flag.String("master-secret", "/constellation/constellation-mastersecret.base64", "Path to the Constellation master secret")
klog.InitFlags(nil)
flag.Parse() flag.Parse()
defer klog.Flush()
klog.V(2).Infof("\nConstellation Key Management Service\nVersion: %s", constants.VersionInfo) log := logger.New(logger.JSONLog, zapcore.InfoLevel)
log.With(zap.String("version", constants.VersionInfo)).Infof("Constellation Key Management Service")
masterKey, err := readMainSecret(*masterSecretPath) masterKey, err := readMainSecret(*masterSecretPath)
if err != nil { if err != nil {
klog.Exitf("Failed to read master secret: %v", err) log.With(zap.Error(err)).Fatalf("Failed to read master secret")
} }
conKMS, err := setup.SetUpKMS(context.Background(), setup.NoStoreURI, setup.ClusterKMSURI) conKMS, err := setup.SetUpKMS(context.Background(), setup.NoStoreURI, setup.ClusterKMSURI)
if err != nil { if err != nil {
klog.Exitf("Failed to setup KMS: %v", err) log.With(zap.Error(err)).Fatalf("Failed to setup KMS")
} }
if err := conKMS.CreateKEK(context.Background(), "Constellation", masterKey); err != nil { if err := conKMS.CreateKEK(context.Background(), "Constellation", masterKey); err != nil {
klog.Exitf("Failed to create KMS KEK from MasterKey: %v", err) log.With(zap.Error(err)).Fatalf("Failed to create KMS KEK from MasterKey")
} }
lis, err := net.Listen("tcp", net.JoinHostPort("", *port)) lis, err := net.Listen("tcp", net.JoinHostPort("", *port))
if err != nil { if err != nil {
klog.Exitf("Failed to listen: %v", err) log.With(zap.Error(err)).Fatalf("Failed to listen")
} }
srv := kmsapi.New(&zap.Logger{}, conKMS) srv := kmsapi.New(log.Named("server"), conKMS)
log.Named("gRPC").WithIncreasedLevel(zapcore.WarnLevel).ReplaceGRPCLogger()
// TODO: Launch server with aTLS to allow attestation for clients. // TODO: Launch server with aTLS to allow attestation for clients.
grpcServer := grpc.NewServer( grpcServer := grpc.NewServer(log.Named("gRPC").GetServerUnaryInterceptor())
grpc.UnaryInterceptor(grpc_klog.LogGRPC(2)),
)
kmsproto.RegisterAPIServer(grpcServer, srv) kmsproto.RegisterAPIServer(grpcServer, srv)
klog.V(2).Infof("Starting key management service on %s", lis.Addr().String()) log.Infof("Starting key management service on %s", lis.Addr().String())
if err := grpcServer.Serve(lis); err != nil { if err := grpcServer.Serve(lis); err != nil {
klog.Exitf("Failed to serve: %s", err) log.With(zap.Error(err)).Fatalf("Failed to serve")
} }
} }

View File

@ -4,46 +4,49 @@ package kmsapi
import ( import (
"context" "context"
"github.com/edgelesssys/constellation/internal/grpc/grpclog"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/edgelesssys/constellation/kms/kms" "github.com/edgelesssys/constellation/kms/kms"
"github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto" "github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto"
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"k8s.io/klog/v2"
) )
// API resembles an encryption key management api server through logger, CloudKMS and proto-unimplemented server. // API resembles an encryption key management api server through logger, CloudKMS and proto-unimplemented server.
type API struct { type API struct {
logger *zap.Logger log *logger.Logger
conKMS kms.CloudKMS conKMS kms.CloudKMS
kmsproto.UnimplementedAPIServer kmsproto.UnimplementedAPIServer
} }
// New creates a new API. // New creates a new API.
func New(logger *zap.Logger, conKMS kms.CloudKMS) *API { func New(log *logger.Logger, conKMS kms.CloudKMS) *API {
return &API{ return &API{
logger: logger, log: log,
conKMS: conKMS, conKMS: conKMS,
} }
} }
// GetDataKey returns a data key. // GetDataKey returns a data key.
func (a *API) GetDataKey(ctx context.Context, in *kmsproto.GetDataKeyRequest) (*kmsproto.GetDataKeyResponse, error) { func (a *API) GetDataKey(ctx context.Context, in *kmsproto.GetDataKeyRequest) (*kmsproto.GetDataKeyResponse, error) {
log := a.log.With("peerAddress", grpclog.PeerAddrFromContext(ctx))
// Error on 0 key length // Error on 0 key length
if in.Length == 0 { if in.Length == 0 {
klog.Error("GetDataKey: requested key length is zero") log.Errorf("Requested key length is zero")
return nil, status.Error(codes.InvalidArgument, "can't derive key with length zero") return nil, status.Error(codes.InvalidArgument, "can't derive key with length zero")
} }
// Error on empty DataKeyId // Error on empty DataKeyId
if in.DataKeyId == "" { if in.DataKeyId == "" {
klog.Error("GetDataKey: no data key ID specified") log.Errorf("No data key ID specified")
return nil, status.Error(codes.InvalidArgument, "no data key ID specified") return nil, status.Error(codes.InvalidArgument, "no data key ID specified")
} }
key, err := a.conKMS.GetDEK(ctx, "Constellation", "key-"+in.DataKeyId, int(in.Length)) key, err := a.conKMS.GetDEK(ctx, "Constellation", "key-"+in.DataKeyId, int(in.Length))
if err != nil { if err != nil {
klog.Errorf("GetDataKey: failed to get data key: %v", err) log.With(zap.Error(err)).Errorf("Failed to get data key")
return nil, status.Errorf(codes.Internal, "%v", err) return nil, status.Errorf(codes.Internal, "%v", err)
} }
return &kmsproto.GetDataKeyResponse{DataKey: key}, nil return &kmsproto.GetDataKeyResponse{DataKey: key}, nil

View File

@ -5,18 +5,20 @@ import (
"errors" "errors"
"testing" "testing"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto" "github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
) )
func TestGetDataKey(t *testing.T) { func TestGetDataKey(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
require := require.New(t) require := require.New(t)
log := logger.NewTest(t)
kms := &stubKMS{derivedKey: []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5}} kms := &stubKMS{derivedKey: []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5}}
api := New(zaptest.NewLogger(t), kms) api := New(log, kms)
res, err := api.GetDataKey(context.Background(), &kmsproto.GetDataKeyRequest{DataKeyId: "1", Length: 32}) res, err := api.GetDataKey(context.Background(), &kmsproto.GetDataKeyRequest{DataKeyId: "1", Length: 32})
require.NoError(err) require.NoError(err)
@ -33,7 +35,7 @@ func TestGetDataKey(t *testing.T) {
assert.Nil(res) assert.Nil(res)
// Test derive key error // Test derive key error
api = New(zaptest.NewLogger(t), &stubKMS{deriveKeyErr: errors.New("error")}) api = New(log, &stubKMS{deriveKeyErr: errors.New("error")})
res, err = api.GetDataKey(context.Background(), &kmsproto.GetDataKeyRequest{DataKeyId: "1", Length: 32}) res, err = api.GetDataKey(context.Background(), &kmsproto.GetDataKeyRequest{DataKeyId: "1", Length: 32})
assert.Error(err) assert.Error(err)
assert.Nil(res) assert.Nil(res)

View File

@ -4,8 +4,6 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"log"
"os"
"path/filepath" "path/filepath"
"strings" "strings"
"time" "time"
@ -17,10 +15,14 @@ import (
"github.com/edgelesssys/constellation/internal/attestation/gcp" "github.com/edgelesssys/constellation/internal/attestation/gcp"
"github.com/edgelesssys/constellation/internal/attestation/qemu" "github.com/edgelesssys/constellation/internal/attestation/qemu"
"github.com/edgelesssys/constellation/internal/attestation/vtpm" "github.com/edgelesssys/constellation/internal/attestation/vtpm"
"github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/edgelesssys/constellation/state/keyservice" "github.com/edgelesssys/constellation/state/keyservice"
"github.com/edgelesssys/constellation/state/mapper" "github.com/edgelesssys/constellation/state/mapper"
"github.com/edgelesssys/constellation/state/setup" "github.com/edgelesssys/constellation/state/setup"
"github.com/spf13/afero" "github.com/spf13/afero"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
) )
const ( const (
@ -34,7 +36,9 @@ var csp = flag.String("csp", "", "Cloud Service Provider the image is running on
func main() { func main() {
flag.Parse() flag.Parse()
log.Printf("Starting disk-mapper for csp %q\n", *csp) log := logger.New(logger.JSONLog, zapcore.InfoLevel)
log.With(zap.String("version", constants.VersionInfo), zap.String("cloudProvider", *csp)).
Infof("Starting disk-mapper")
// set up metadata API and quote issuer for aTLS connections // set up metadata API and quote issuer for aTLS connections
var err error var err error
@ -47,7 +51,7 @@ func main() {
diskPath, diskPathErr = filepath.EvalSymlinks(azureStateDiskPath) diskPath, diskPathErr = filepath.EvalSymlinks(azureStateDiskPath)
metadata, err = azurecloud.NewMetadata(context.Background()) metadata, err = azurecloud.NewMetadata(context.Background())
if err != nil { if err != nil {
exit(err) log.With(zap.Error).Fatalf("Failed to create Azure metadata API")
} }
issuer = azure.NewIssuer() issuer = azure.NewIssuer()
@ -56,34 +60,35 @@ func main() {
issuer = gcp.NewIssuer() issuer = gcp.NewIssuer()
gcpClient, err := gcpcloud.NewClient(context.Background()) gcpClient, err := gcpcloud.NewClient(context.Background())
if err != nil { if err != nil {
exit(err) log.With(zap.Error).Fatalf("Failed to create GCP client")
} }
metadata = gcpcloud.New(gcpClient) metadata = gcpcloud.New(gcpClient)
case "qemu": case "qemu":
diskPath = qemuStateDiskPath diskPath = qemuStateDiskPath
issuer = qemu.NewIssuer() issuer = qemu.NewIssuer()
fmt.Fprintf(os.Stderr, "warning: cloud services are not supported for csp %q\n", *csp) log.Warnf("cloud services are not supported on QEMU")
metadata = &core.ProviderMetadataFake{} metadata = &core.ProviderMetadataFake{}
default: default:
diskPathErr = fmt.Errorf("csp %q is not supported by Constellation", *csp) diskPathErr = fmt.Errorf("csp %q is not supported by Constellation", *csp)
} }
if diskPathErr != nil { if diskPathErr != nil {
exit(fmt.Errorf("unable to determine state disk path: %w", diskPathErr)) log.With(zap.Error(diskPathErr)).Fatalf("Unable to determine state disk path")
} }
// initialize device mapper // initialize device mapper
mapper, err := mapper.New(diskPath) mapper, err := mapper.New(diskPath)
if err != nil { if err != nil {
exit(err) log.With(zap.Error(err)).Fatalf("Failed to initialize device mapper")
} }
defer mapper.Close() defer mapper.Close()
setupManger := setup.New( setupManger := setup.New(
log.Named("setupManager"),
*csp, *csp,
afero.Afero{Fs: afero.NewOsFs()}, afero.Afero{Fs: afero.NewOsFs()},
keyservice.New(issuer, metadata, 20*time.Second), // try to request a key every 20 seconds keyservice.New(log.Named("keyService"), issuer, metadata, 20*time.Second), // try to request a key every 20 seconds
mapper, mapper,
setup.DiskMounter{}, setup.DiskMounter{},
vtpm.OpenVTPM, vtpm.OpenVTPM,
@ -95,13 +100,7 @@ func main() {
} else { } else {
err = setupManger.PrepareNewDisk() err = setupManger.PrepareNewDisk()
} }
exit(err)
}
func exit(err error) {
if err != nil { if err != nil {
fmt.Fprintln(os.Stderr, err.Error()) log.With(zap.Error(err)).Fatalf("Failed to prepare state disk")
os.Exit(1)
} }
os.Exit(0)
} }

View File

@ -3,7 +3,6 @@ package keyservice
import ( import (
"context" "context"
"errors" "errors"
"log"
"net" "net"
"sync" "sync"
"time" "time"
@ -12,7 +11,9 @@ import (
"github.com/edgelesssys/constellation/coordinator/core" "github.com/edgelesssys/constellation/coordinator/core"
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto" "github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials" "github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/edgelesssys/constellation/state/keyservice/keyproto" "github.com/edgelesssys/constellation/state/keyservice/keyproto"
"go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
@ -21,6 +22,7 @@ import (
// KeyAPI is the interface called by the Coordinator or an admin during restart of a node. // KeyAPI is the interface called by the Coordinator or an admin during restart of a node.
type KeyAPI struct { type KeyAPI struct {
log *logger.Logger
mux sync.Mutex mux sync.Mutex
metadata core.ProviderMetadata metadata core.ProviderMetadata
issuer core.QuoteIssuer issuer core.QuoteIssuer
@ -31,8 +33,9 @@ type KeyAPI struct {
} }
// New initializes a KeyAPI with the given parameters. // New initializes a KeyAPI with the given parameters.
func New(issuer core.QuoteIssuer, metadata core.ProviderMetadata, timeout time.Duration) *KeyAPI { func New(log *logger.Logger, issuer core.QuoteIssuer, metadata core.ProviderMetadata, timeout time.Duration) *KeyAPI {
return &KeyAPI{ return &KeyAPI{
log: log,
metadata: metadata, metadata: metadata,
issuer: issuer, issuer: issuer,
keyReceived: make(chan struct{}, 1), keyReceived: make(chan struct{}, 1),
@ -71,7 +74,7 @@ func (a *KeyAPI) WaitForDecryptionKey(uuid, listenAddr string) ([]byte, error) {
} }
defer listener.Close() defer listener.Close()
log.Printf("Waiting for decryption key. Listening on: %s", listener.Addr().String()) a.log.Infof("Waiting for decryption key. Listening on: %s", listener.Addr().String())
go server.Serve(listener) go server.Serve(listener)
defer server.GracefulStop() defer server.GracefulStop()
@ -118,7 +121,7 @@ func (a *KeyAPI) requestKey(uuid string, credentials credentials.TransportCreden
// list available Coordinators // list available Coordinators
endpoints, _ := core.CoordinatorEndpoints(context.Background(), a.metadata) endpoints, _ := core.CoordinatorEndpoints(context.Background(), a.metadata)
log.Printf("Sending a key request to available Coordinators: %v", endpoints) a.log.With(zap.Strings("endpoints", endpoints)).Infof("Sending a key request to available Coordinators")
// notify all available Coordinators to send a key to the node // notify all available Coordinators to send a key to the node
// any errors encountered here will be ignored, and the calls retried after a timeout // any errors encountered here will be ignored, and the calls retried after a timeout
for _, endpoint := range endpoints { for _, endpoint := range endpoints {

View File

@ -12,6 +12,7 @@ import (
"github.com/edgelesssys/constellation/coordinator/role" "github.com/edgelesssys/constellation/coordinator/role"
"github.com/edgelesssys/constellation/internal/atls" "github.com/edgelesssys/constellation/internal/atls"
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials" "github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/edgelesssys/constellation/internal/oid" "github.com/edgelesssys/constellation/internal/oid"
"github.com/edgelesssys/constellation/state/keyservice/keyproto" "github.com/edgelesssys/constellation/state/keyservice/keyproto"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -85,6 +86,7 @@ func TestRequestKeyLoop(t *testing.T) {
} }
keyWaiter := &KeyAPI{ keyWaiter := &KeyAPI{
log: logger.NewTest(t),
metadata: stubMetadata{listResponse: tc.listResponse}, metadata: stubMetadata{listResponse: tc.listResponse},
keyReceived: keyReceived, keyReceived: keyReceived,
timeout: 500 * time.Millisecond, timeout: 500 * time.Millisecond,
@ -138,6 +140,7 @@ func TestPushStateDiskKey(t *testing.T) {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
tc.testAPI.log = logger.NewTest(t)
_, err := tc.testAPI.PushStateDiskKey(context.Background(), tc.request) _, err := tc.testAPI.PushStateDiskKey(context.Background(), tc.request)
if tc.wantErr { if tc.wantErr {
assert.Error(err) assert.Error(err)
@ -150,7 +153,7 @@ func TestPushStateDiskKey(t *testing.T) {
} }
func TestResetKey(t *testing.T) { func TestResetKey(t *testing.T) {
api := New(nil, nil, time.Second) api := New(logger.NewTest(t), nil, nil, time.Second)
api.key = []byte{0x1, 0x2, 0x3} api.key = []byte{0x1, 0x2, 0x3}
api.ResetKey() api.ResetKey()

View File

@ -3,7 +3,6 @@ package setup
import ( import (
"crypto/rand" "crypto/rand"
"errors" "errors"
"log"
"net" "net"
"os" "os"
"path/filepath" "path/filepath"
@ -13,7 +12,9 @@ import (
"github.com/edgelesssys/constellation/coordinator/nodestate" "github.com/edgelesssys/constellation/coordinator/nodestate"
"github.com/edgelesssys/constellation/internal/attestation/vtpm" "github.com/edgelesssys/constellation/internal/attestation/vtpm"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"go.uber.org/zap"
) )
const ( const (
@ -27,6 +28,7 @@ const (
// SetupManager handles formating, mapping, mounting and unmounting of state disks. // SetupManager handles formating, mapping, mounting and unmounting of state disks.
type SetupManager struct { type SetupManager struct {
log *logger.Logger
csp string csp string
fs afero.Afero fs afero.Afero
keyWaiter KeyWaiter keyWaiter KeyWaiter
@ -36,8 +38,9 @@ type SetupManager struct {
} }
// New initializes a SetupManager with the given parameters. // New initializes a SetupManager with the given parameters.
func New(csp string, fs afero.Afero, keyWaiter KeyWaiter, mapper DeviceMapper, mounter Mounter, openTPM vtpm.TPMOpenFunc) *SetupManager { func New(log *logger.Logger, csp string, fs afero.Afero, keyWaiter KeyWaiter, mapper DeviceMapper, mounter Mounter, openTPM vtpm.TPMOpenFunc) *SetupManager {
return &SetupManager{ return &SetupManager{
log: log,
csp: csp, csp: csp,
fs: fs, fs: fs,
keyWaiter: keyWaiter, keyWaiter: keyWaiter,
@ -50,7 +53,7 @@ func New(csp string, fs afero.Afero, keyWaiter KeyWaiter, mapper DeviceMapper, m
// PrepareExistingDisk requests and waits for a decryption key to remap the encrypted state disk. // PrepareExistingDisk requests and waits for a decryption key to remap the encrypted state disk.
// Once the disk is mapped, the function taints the node as initialized by updating it's PCRs. // Once the disk is mapped, the function taints the node as initialized by updating it's PCRs.
func (s *SetupManager) PrepareExistingDisk() error { func (s *SetupManager) PrepareExistingDisk() error {
log.Println("Preparing existing state disk") s.log.Infof("Preparing existing state disk")
uuid := s.mapper.DiskUUID() uuid := s.mapper.DiskUUID()
getKey: getKey:
@ -61,6 +64,7 @@ getKey:
if err := s.mapper.MapDisk(stateDiskMappedName, string(passphrase)); err != nil { if err := s.mapper.MapDisk(stateDiskMappedName, string(passphrase)); err != nil {
// retry key fetching if disk mapping fails // retry key fetching if disk mapping fails
s.log.With(zap.Error(err)).Errorf("Failed to map state disk, retrying...")
s.keyWaiter.ResetKey() s.keyWaiter.ResetKey()
goto getKey goto getKey
} }
@ -88,7 +92,7 @@ getKey:
// PrepareNewDisk prepares an instances state disk by formatting the disk as a LUKS device using a random passphrase. // PrepareNewDisk prepares an instances state disk by formatting the disk as a LUKS device using a random passphrase.
func (s *SetupManager) PrepareNewDisk() error { func (s *SetupManager) PrepareNewDisk() error {
log.Println("Preparing new state disk") s.log.Infof("Preparing new state disk")
// generate and save temporary passphrase // generate and save temporary passphrase
if err := s.fs.MkdirAll(keyPath, os.ModePerm); err != nil { if err := s.fs.MkdirAll(keyPath, os.ModePerm); err != nil {

View File

@ -11,6 +11,7 @@ import (
"github.com/edgelesssys/constellation/coordinator/nodestate" "github.com/edgelesssys/constellation/coordinator/nodestate"
"github.com/edgelesssys/constellation/internal/attestation/vtpm" "github.com/edgelesssys/constellation/internal/attestation/vtpm"
"github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/file"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -107,7 +108,15 @@ func TestPrepareExistingDisk(t *testing.T) {
require.NoError(t, handler.WriteJSON(stateInfoPath, nodestate.NodeState{OwnerID: []byte("ownerID"), ClusterID: []byte("clusterID")}, file.OptMkdirAll)) require.NoError(t, handler.WriteJSON(stateInfoPath, nodestate.NodeState{OwnerID: []byte("ownerID"), ClusterID: []byte("clusterID")}, file.OptMkdirAll))
} }
setupManager := New("test", tc.fs, tc.keyWaiter, tc.mapper, tc.mounter, tc.openTPM) setupManager := New(
logger.NewTest(t),
"test",
tc.fs,
tc.keyWaiter,
tc.mapper,
tc.mounter,
tc.openTPM,
)
err := setupManager.PrepareExistingDisk() err := setupManager.PrepareExistingDisk()
if tc.wantErr { if tc.wantErr {
@ -167,7 +176,7 @@ func TestPrepareNewDisk(t *testing.T) {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
setupManager := New("test", tc.fs, nil, tc.mapper, nil, nil) setupManager := New(logger.NewTest(t), "test", tc.fs, nil, tc.mapper, nil, nil)
err := setupManager.PrepareNewDisk() err := setupManager.PrepareNewDisk()
if tc.wantErr { if tc.wantErr {
@ -233,7 +242,7 @@ func TestReadInitSecrets(t *testing.T) {
require.NoError(handler.WriteJSON("/tmp/test-state.json", state, file.OptMkdirAll)) require.NoError(handler.WriteJSON("/tmp/test-state.json", state, file.OptMkdirAll))
} }
setupManager := New("test", tc.fs, nil, nil, nil, nil) setupManager := New(logger.NewTest(t), "test", tc.fs, nil, nil, nil, nil)
ownerID, clusterID, err := setupManager.readInitSecrets("/tmp/test-state.json") ownerID, clusterID, err := setupManager.readInitSecrets("/tmp/test-state.json")
if tc.wantErr { if tc.wantErr {

View File

@ -14,6 +14,7 @@ import (
"github.com/edgelesssys/constellation/coordinator/core" "github.com/edgelesssys/constellation/coordinator/core"
"github.com/edgelesssys/constellation/internal/atls" "github.com/edgelesssys/constellation/internal/atls"
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials" "github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
"github.com/edgelesssys/constellation/internal/logger"
"github.com/edgelesssys/constellation/internal/oid" "github.com/edgelesssys/constellation/internal/oid"
"github.com/edgelesssys/constellation/state/keyservice" "github.com/edgelesssys/constellation/state/keyservice"
"github.com/edgelesssys/constellation/state/keyservice/keyproto" "github.com/edgelesssys/constellation/state/keyservice/keyproto"
@ -85,7 +86,12 @@ func TestKeyAPI(t *testing.T) {
apiAddr := listener.Addr().String() apiAddr := listener.Addr().String()
listener.Close() listener.Close()
api := keyservice.New(atls.NewFakeIssuer(oid.Dummy{}), &core.ProviderMetadataFake{}, 20*time.Second) api := keyservice.New(
logger.NewTest(t),
atls.NewFakeIssuer(oid.Dummy{}),
&core.ProviderMetadataFake{},
20*time.Second,
)
// send a key to the server // send a key to the server
go func() { go func() {