rewrote packages

keyservice
joinservice
upgrade-agent
measurement-reader
debugd
disk-mapper

rewrote joinservice main

rewrote some unit tests

rewrote upgrade-agent + some grpc functions

rewrote measurement-reader

rewrote debugd

removed unused import

removed forgotten zap reference in measurements reader

rewrote disk-mapper + tests

rewrote packages

verify
disk-mapper
malicious join
bootstrapper
attestationconfigapi
versionapi
internal/cloud/azure
disk-mapper tests
image/upload/internal/cmd

rewrote verify (WIP with loglevel increase)

rewrote forgotten zap references in disk-mapper

rewrote malicious join

rewrote bootstrapper

rewrote parts of internal/

rewrote attestationconfigapi (WIP)

rewrote versionapi cli

rewrote internal/cloud/azure

rewrote disk-mapper tests (untested by me rn)

rewrote image/upload/internal/cmd

removed forgotten zap references in verify/cmd

rewrote packages

hack/oci-pin
hack/qemu-metadata-api
debugd/internal/debugd/deploy
hack/bazel-deps-mirror
cli/internal/cmd
cli-k8s-compatibility

rewrote hack/qemu-metadata-api/server

rewrote debugd/internal/debugd/deploy

rewrote hack/bazel-deps-mirror

rewrote rest of hack/qemu-metadata-api

rewrote forgotten zap references in joinservice server

rewrote cli/internal/cmd

rewrote cli-k8s-compatibility

rewrote packages

internal/staticupload
e2d/internal/upgrade
internal/constellation/helm
internal/attestation/aws/snp
internal/attestation/azure/trustedlaunch
joinservice/internal/certcache/amkds

some missed unit tests

rewrote e2e/internal/upgrade

rewrote internal/constellation/helm

internal/attestation/aws/snp

internal/attestation/azure/trustedlaunch

joinservice/internal/certcache/amkds

search and replace test logging over all left *_test.go
This commit is contained in:
miampf 2023-12-29 15:18:59 +01:00
parent 48d5a157dd
commit f16ccf5679
No known key found for this signature in database
GPG key ID: 376EAC0E5307A669
158 changed files with 3400 additions and 1278 deletions

View file

@ -7,14 +7,14 @@ SPDX-License-Identifier: AGPL-3.0-only
package main
import (
"context"
"flag"
"io"
"os"
"strconv"
"context"
"flag"
"io"
"log/slog"
"os"
"strconv"
"github.com/spf13/afero"
"go.uber.org/zap"
"github.com/spf13/afero"
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi"
@ -37,25 +37,25 @@ import (
)
const (
// constellationCSP is the environment variable stating which Cloud Service Provider Constellation is running on.
constellationCSP = "CONSTEL_CSP"
// constellationCSP is the environment variable stating which Cloud Service Provider Constellation is running on.
constellationCSP = "CONSTEL_CSP"
)
func main() {
gRPCDebug := flag.Bool("debug", false, "Enable gRPC debug logging")
verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription)
flag.Parse()
log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)).Named("bootstrapper")
defer log.Sync()
gRPCDebug := flag.Bool("debug", false, "Enable gRPC debug logging")
verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription)
flag.Parse()
log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: logger.VerbosityFromInt(*verbosity)})).WithGroup("bootstrapper")
if *gRPCDebug {
log.Named("gRPC").ReplaceGRPCLogger()
} else {
log.Named("gRPC").WithIncreasedLevel(zap.WarnLevel).ReplaceGRPCLogger()
}
if *gRPCDebug {
logger.ReplaceGRPCLogger(log.WithGroup("gRPC"))
} else {
//TODO(miampf): Find a good way to dynamically increase slog logLevel
logger.ReplaceGRPCLogger(log.WithGroup("gRPC").WithIncreasedLevel(slog.LevelWarn))
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
bindIP := "0.0.0.0"
bindPort := strconv.Itoa(constants.BootstrapperPort)
@ -64,22 +64,25 @@ func main() {
var openDevice vtpm.TPMOpenFunc
var fs afero.Fs
attestVariant, err := variant.FromString(os.Getenv(constants.AttestationVariant))
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to parse attestation variant")
}
issuer, err := choose.Issuer(attestVariant, log)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to select issuer")
}
attestVariant, err := variant.FromString(os.Getenv(constants.AttestationVariant))
if err != nil {
log.With(slog.Any("error", err).Error("Failed to parse attestation variant")
os.Exit(1)
}
issuer, err := choose.Issuer(attestVariant, log)
if err != nil {
log.With(slog.Any("error", err).Error("Failed to select issuer")
os.Exit(1)
}
switch cloudprovider.FromString(os.Getenv(constellationCSP)) {
case cloudprovider.AWS:
metadata, err := awscloud.New(ctx)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to set up AWS metadata API")
}
metadataAPI = metadata
switch cloudprovider.FromString(os.Getenv(constellationCSP)) {
case cloudprovider.AWS:
metadata, err := awscloud.New(ctx)
if err != nil {
log.With(slog.Any("error", err).Error("Failed to set up AWS metadata API")
os.Exit(1)
}
metadataAPI = metadata
clusterInitJoiner = kubernetes.New(
"aws", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(),
@ -88,12 +91,13 @@ func main() {
openDevice = vtpm.OpenVTPM
fs = afero.NewOsFs()
case cloudprovider.GCP:
metadata, err := gcpcloud.New(ctx)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to create GCP metadata client")
}
defer metadata.Close()
case cloudprovider.GCP:
metadata, err := gcpcloud.New(ctx)
if err != nil {
log.With(slog.Any("error", err).Error("Failed to create GCP metadata client")
os.Exit(1)
}
defer metadata.Close()
metadataAPI = metadata
clusterInitJoiner = kubernetes.New(
@ -113,14 +117,14 @@ func main() {
log.With(zap.Error(err)).Fatalf("Failed to prepare Azure control plane node")
}
metadataAPI = metadata
clusterInitJoiner = kubernetes.New(
"azure", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(),
metadata, &kubewaiter.CloudKubeAPIWaiter{},
)
metadataAPI = metadata
clusterInitJoiner = kubernetes.New(
"azure", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(),
metadata, &kubewaiter.CloudKubeAPIWaiter{},
)
openDevice = vtpm.OpenVTPM
fs = afero.NewOsFs()
openDevice = vtpm.OpenVTPM
fs = afero.NewOsFs()
case cloudprovider.QEMU:
metadata := qemucloud.New()
@ -162,7 +166,7 @@ func main() {
fs = afero.NewMemMapFs()
}
fileHandler := file.NewHandler(fs)
fileHandler := file.NewHandler(fs)
run(issuer, openDevice, fileHandler, clusterInitJoiner, metadataAPI, bindIP, bindPort, log)
}

View file

@ -7,8 +7,10 @@ SPDX-License-Identifier: AGPL-3.0-only
package main
import (
"context"
"net"
"context"
"log/slog"
"net"
"os"
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/clean"
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/diskencryption"
@ -38,57 +40,61 @@ func run(issuer atls.Issuer, openDevice vtpm.TPMOpenFunc, fileHandler file.Handl
log.Infof("Disk UUID: %s", uuid)
}
nodeBootstrapped, err := initialize.IsNodeBootstrapped(openDevice)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to check if node was previously bootstrapped")
}
nodeBootstrapped, err := initialize.IsNodeBootstrapped(openDevice)
if err != nil {
log.With(slog.Any("error", err)).Error("Failed to check if node was previously bootstrapped")
os.Exit(1)
}
if nodeBootstrapped {
if err := kube.StartKubelet(); err != nil {
log.With(zap.Error(err)).Fatalf("Failed to restart kubelet")
}
return
}
if nodeBootstrapped {
if err := kube.StartKubelet(); err != nil {
log.With(slog.Any("error", err)).Error("Failed to restart kubelet")
os.Exit(1)
}
return
}
nodeLock := nodelock.New(openDevice)
initServer, err := initserver.New(context.Background(), nodeLock, kube, issuer, fileHandler, metadata, log)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to create init server")
}
nodeLock := nodelock.New(openDevice)
initServer, err := initserver.New(context.Background(), nodeLock, kube, issuer, fileHandler, metadata, log)
if err != nil {
log.With(slog.Any("error", err)).Error("Failed to create init server")
os.Exit(1)
}
dialer := dialer.New(issuer, nil, &net.Dialer{})
joinClient := joinclient.New(nodeLock, dialer, kube, metadata, log)
dialer := dialer.New(issuer, nil, &net.Dialer{})
joinClient := joinclient.New(nodeLock, dialer, kube, metadata, log)
cleaner := clean.New().With(initServer).With(joinClient)
go cleaner.Start()
defer cleaner.Done()
cleaner := clean.New().With(initServer).With(joinClient)
go cleaner.Start()
defer cleaner.Done()
joinClient.Start(cleaner)
joinClient.Start(cleaner)
if err := initServer.Serve(bindIP, bindPort, cleaner); err != nil {
log.With(zap.Error(err)).Fatalf("Failed to serve init server")
}
if err := initServer.Serve(bindIP, bindPort, cleaner); err != nil {
log.With(slog.Any("error", err)).Error("Failed to serve init server")
os.Exit(1)
}
log.Infof("bootstrapper done")
}
func getDiskUUID() (string, error) {
disk := diskencryption.New()
free, err := disk.Open()
if err != nil {
return "", err
}
defer free()
return disk.UUID()
disk := diskencryption.New()
free, err := disk.Open()
if err != nil {
return "", err
}
defer free()
return disk.UUID()
}
type clusterInitJoiner interface {
joinclient.ClusterJoiner
initserver.ClusterInitializer
StartKubelet() error
joinclient.ClusterJoiner
initserver.ClusterInitializer
StartKubelet() error
}
type metadataAPI interface {
joinclient.MetadataAPI
initserver.MetadataAPI
joinclient.MetadataAPI
initserver.MetadataAPI
}

View file

@ -23,6 +23,7 @@ import (
"errors"
"fmt"
"io"
"log/slog"
"net"
"strings"
"sync"
@ -43,7 +44,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/nodestate"
"github.com/edgelesssys/constellation/v2/internal/role"
"github.com/edgelesssys/constellation/v2/internal/versions/components"
"go.uber.org/zap"
"golang.org/x/crypto/bcrypt"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@ -68,7 +68,7 @@ type Server struct {
kmsURI string
log *logger.Logger
log *slog.Logger
journaldCollector journaldCollection
@ -76,8 +76,8 @@ type Server struct {
}
// New creates a new initialization server.
func New(ctx context.Context, lock locker, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, metadata MetadataAPI, log *logger.Logger) (*Server, error) {
log = log.Named("initServer")
func New(ctx context.Context, lock locker, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, metadata MetadataAPI, log *slog.Logger) (*Server, error) {
log = log.WithGroup("initServer")
initSecretHash, err := metadata.InitSecretHash(ctx)
if err != nil {
@ -106,7 +106,7 @@ func New(ctx context.Context, lock locker, kube ClusterInitializer, issuer atls.
grpcServer := grpc.NewServer(
grpc.Creds(atlscredentials.New(issuer, nil)),
grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}),
log.Named("gRPC").GetServerUnaryInterceptor(),
logger.GetServerUnaryInterceptor(log.WithGroup("gRPC")),
)
initproto.RegisterAPIServer(grpcServer, server)
@ -122,7 +122,7 @@ func (s *Server) Serve(ip, port string, cleaner cleaner) error {
return fmt.Errorf("failed to listen: %w", err)
}
s.log.Infof("Starting")
s.log.Info("Starting")
return s.grpcServer.Serve(lis)
}
@ -132,8 +132,8 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe
s.shutdownLock.RLock()
defer s.shutdownLock.RUnlock()
log := s.log.With(zap.String("peer", grpclog.PeerAddrFromContext(stream.Context())))
log.Infof("Init called")
log := s.log.With(slog.String("peer", grpclog.PeerAddrFromContext(stream.Context())))
log.Info("Init called")
s.kmsURI = req.KmsUri
@ -174,7 +174,7 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe
// init does not make sense, so we just stop.
//
// The server stops itself after the current call is done.
log.Warnf("Node is already in a join process")
log.Warn("Node is already in a join process")
err = status.Error(codes.FailedPrecondition, "node is already being activated")
@ -228,7 +228,7 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe
return err
}
log.Infof("Init succeeded")
log.Info("Init succeeded")
successMessage := &initproto.InitResponse_InitSuccess{
InitSuccess: &initproto.InitSuccessResponse{
@ -287,14 +287,14 @@ func (s *Server) sendLogsWithMessage(stream initproto.API_InitServer, message er
// Stop stops the initialization server gracefully.
func (s *Server) Stop() {
s.log.Infof("Stopping")
s.log.Info("Stopping")
// Make sure to only stop the server if no Init calls are running
s.shutdownLock.Lock()
defer s.shutdownLock.Unlock()
s.grpcServer.GracefulStop()
s.log.Infof("Stopped")
s.log.Info("Stopped")
}
func (s *Server) setupDisk(ctx context.Context, cloudKms kms.CloudKMS) error {
@ -342,7 +342,7 @@ type ClusterInitializer interface {
kubernetesComponents components.Components,
apiServerCertSANs []string,
serviceCIDR string,
log *logger.Logger,
log *slog.Logger,
) ([]byte, error)
}

View file

@ -16,6 +16,7 @@ import (
"sync"
"testing"
"time"
"log/slog"
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
"github.com/edgelesssys/constellation/v2/internal/atls"
@ -66,7 +67,7 @@ func TestNew(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
server, err := New(context.TODO(), newFakeLock(), &stubClusterInitializer{}, atls.NewFakeIssuer(variant.Dummy{}), fh, &tc.metadata, logger.NewTest(t))
server, err := New(context.TODO(), newFakeLock(), &stubClusterInitializer{}, atls.NewFakeIssuer(variant.Dummy{}), fh, &tc.metadata, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)))
if tc.wantErr {
assert.Error(err)
return
@ -214,7 +215,7 @@ func TestInit(t *testing.T) {
initializer: tc.initializer,
disk: tc.disk,
fileHandler: tc.fileHandler,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
grpcServer: serveStopper,
cleaner: &fakeCleaner{serveStopper: serveStopper},
initSecretHash: tc.initSecretHash,
@ -408,7 +409,7 @@ type stubClusterInitializer struct {
func (i *stubClusterInitializer) InitCluster(
context.Context, string, string,
bool, components.Components, []string, string, *logger.Logger,
bool, components.Components, []string, string, *slog.Logger,
) ([]byte, error) {
return i.initClusterKubeconfig, i.initClusterErr
}

View file

@ -21,6 +21,7 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"net"
"path/filepath"
"strconv"
@ -39,7 +40,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/versions/components"
"github.com/edgelesssys/constellation/v2/joinservice/joinproto"
"github.com/spf13/afero"
"go.uber.org/zap"
"google.golang.org/grpc"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
kubeconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
@ -73,7 +73,7 @@ type JoinClient struct {
cleaner cleaner
metadataAPI MetadataAPI
log *logger.Logger
log *slog.Logger
mux sync.Mutex
stopC chan struct{}
@ -81,7 +81,7 @@ type JoinClient struct {
}
// New creates a new JoinClient.
func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *logger.Logger) *JoinClient {
func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *slog.Logger) *JoinClient {
return &JoinClient{
nodeLock: lock,
disk: diskencryption.New(),
@ -93,7 +93,7 @@ func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, l
dialer: dial,
joiner: joiner,
metadataAPI: meta,
log: log.Named("join-client"),
log: log.WithGroup("join-client"),
}
}
@ -110,7 +110,7 @@ func (c *JoinClient) Start(cleaner cleaner) {
return
}
c.log.Infof("Starting")
c.log.Info("Starting")
c.stopC = make(chan struct{}, 1)
c.stopDone = make(chan struct{}, 1)
c.cleaner = cleaner
@ -119,11 +119,11 @@ func (c *JoinClient) Start(cleaner cleaner) {
go func() {
defer ticker.Stop()
defer func() { c.stopDone <- struct{}{} }()
defer c.log.Infof("Client stopped")
defer c.log.Info("Client stopped")
diskUUID, err := c.getDiskUUID()
if err != nil {
c.log.With(zap.Error(err)).Errorf("Failed to get disk UUID")
c.log.With(slog.Any("error", err)).Error("Failed to get disk UUID")
return
}
c.diskUUID = diskUUID
@ -131,12 +131,12 @@ func (c *JoinClient) Start(cleaner cleaner) {
for {
err := c.getNodeMetadata()
if err == nil {
c.log.With(zap.String("role", c.role.String()), zap.String("name", c.nodeName)).Infof("Received own instance metadata")
c.log.With(slog.String("role", c.role.String()), slog.String("name", c.nodeName)).Info("Received own instance metadata")
break
}
c.log.With(zap.Error(err)).Errorf("Failed to retrieve instance metadata")
c.log.With(slog.Any("error", err)).Error("Failed to retrieve instance metadata")
c.log.With(zap.Duration("interval", c.interval)).Infof("Sleeping")
c.log.With(slog.Duration("interval", c.interval)).Info("Sleeping")
select {
case <-c.stopC:
return
@ -147,15 +147,15 @@ func (c *JoinClient) Start(cleaner cleaner) {
for {
err := c.tryJoinWithAvailableServices()
if err == nil {
c.log.Infof("Joined successfully. Client is shutting down")
c.log.Info("Joined successfully. Client is shutting down")
return
} else if isUnrecoverable(err) {
c.log.With(zap.Error(err)).Errorf("Unrecoverable error occurred")
c.log.With(slog.Any("error", err)).Error("Unrecoverable error occurred")
return
}
c.log.With(zap.Error(err)).Warnf("Join failed for all available endpoints")
c.log.With(slog.Any("error", err)).Warn("Join failed for all available endpoints")
c.log.With(zap.Duration("interval", c.interval)).Infof("Sleeping")
c.log.With(slog.Duration("interval", c.interval)).Info("Sleeping")
select {
case <-c.stopC:
return
@ -231,7 +231,7 @@ func (c *JoinClient) join(serviceEndpoint string) error {
conn, err := c.dialer.Dial(ctx, serviceEndpoint)
if err != nil {
c.log.With(zap.String("endpoint", serviceEndpoint), zap.Error(err)).Errorf("Join service unreachable")
c.log.With(slog.String("endpoint", serviceEndpoint), slog.Any("error", err)).Error("Join service unreachable")
return fmt.Errorf("dialing join service endpoint: %w", err)
}
defer conn.Close()
@ -244,7 +244,7 @@ func (c *JoinClient) join(serviceEndpoint string) error {
}
ticket, err := protoClient.IssueJoinTicket(ctx, req)
if err != nil {
c.log.With(zap.String("endpoint", serviceEndpoint), zap.Error(err)).Errorf("Issuing join ticket failed")
c.log.With(slog.String("endpoint", serviceEndpoint), slog.Any("error", err)).Error("Issuing join ticket failed")
return fmt.Errorf("issuing join ticket: %w", err)
}
@ -269,7 +269,7 @@ func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse,
nodeLockAcquired, err := c.nodeLock.TryLockOnce(clusterID)
if err != nil {
c.log.With(zap.Error(err)).Errorf("Acquiring node lock failed")
c.log.With(slog.Any("error", err)).Error("Acquiring node lock failed")
return fmt.Errorf("acquiring node lock: %w", err)
}
if !nodeLockAcquired {
@ -322,12 +322,12 @@ func (c *JoinClient) getNodeMetadata() error {
ctx, cancel := c.timeoutCtx()
defer cancel()
c.log.Debugf("Requesting node metadata from metadata API")
c.log.Debug("Requesting node metadata from metadata API")
inst, err := c.metadataAPI.Self(ctx)
if err != nil {
return err
}
c.log.With(zap.Any("instance", inst)).Debugf("Received node metadata")
c.log.With(slog.Any("instance", inst)).Debug("Received node metadata")
if inst.Name == "" {
return errors.New("got instance metadata with empty name")
@ -371,7 +371,7 @@ func (c *JoinClient) getDiskUUID() (string, error) {
func (c *JoinClient) getControlPlaneIPs(ctx context.Context) ([]string, error) {
instances, err := c.metadataAPI.List(ctx)
if err != nil {
c.log.With(zap.Error(err)).Errorf("Failed to list instances from metadata API")
c.log.With(slog.Any("error", err)).Error("Failed to list instances from metadata API")
return nil, fmt.Errorf("listing instances from metadata API: %w", err)
}
@ -382,7 +382,7 @@ func (c *JoinClient) getControlPlaneIPs(ctx context.Context) ([]string, error) {
}
}
c.log.With(zap.Strings("IPs", ips)).Infof("Received control plane endpoints")
c.log.With(slog.Any("IPs", ips)).Info("Received control plane endpoints")
return ips, nil
}

View file

@ -9,6 +9,7 @@ package joinclient
import (
"context"
"errors"
"log/slog"
"net"
"strconv"
"sync"
@ -220,7 +221,7 @@ func TestClient(t *testing.T) {
fileHandler: fileHandler,
metadataAPI: metadataAPI,
clock: clock,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
}
serverCreds := atlscredentials.New(nil, nil)
@ -275,7 +276,7 @@ func TestClientConcurrentStartStop(t *testing.T) {
fileHandler: file.NewHandler(afero.NewMemMapFs()),
metadataAPI: &stubRepeaterMetadataAPI{},
clock: testclock.NewFakeClock(time.Now()),
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
}
wg := sync.WaitGroup{}

View file

@ -13,6 +13,7 @@ import (
"encoding/pem"
"errors"
"fmt"
"log/slog"
"net"
"os"
"os/exec"
@ -30,9 +31,7 @@ import (
"github.com/edgelesssys/constellation/v2/internal/crypto"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/installer"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/afero"
"go.uber.org/zap"
)
const (
@ -87,7 +86,7 @@ func (k *KubernetesUtil) InstallComponents(ctx context.Context, kubernetesCompon
// InitCluster instruments kubeadm to initialize the K8s cluster.
// On success an admin kubeconfig file is returned.
func (k *KubernetesUtil) InitCluster(
ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *logger.Logger,
ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *slog.Logger,
) ([]byte, error) {
// TODO(3u13r): audit policy should be user input
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
@ -108,7 +107,7 @@ func (k *KubernetesUtil) InitCluster(
}
// preflight
log.Infof("Running kubeadm preflight checks")
log.Info("Running kubeadm preflight checks")
cmd := exec.CommandContext(ctx, constants.KubeadmPath, "init", "phase", "preflight", "-v=5", "--config", initConfigFile.Name())
out, err := cmd.CombinedOutput()
if err != nil {
@ -120,7 +119,7 @@ func (k *KubernetesUtil) InitCluster(
}
// create CA certs
log.Infof("Creating Kubernetes control-plane certificates and keys")
log.Info("Creating Kubernetes control-plane certificates and keys")
cmd = exec.CommandContext(ctx, constants.KubeadmPath, "init", "phase", "certs", "all", "-v=5", "--config", initConfigFile.Name())
out, err = cmd.CombinedOutput()
if err != nil {
@ -132,19 +131,19 @@ func (k *KubernetesUtil) InitCluster(
}
// create kubelet key and CA signed certificate for the node
log.Infof("Creating signed kubelet certificate")
log.Info("Creating signed kubelet certificate")
if err := k.createSignedKubeletCert(nodeName, ips); err != nil {
return nil, fmt.Errorf("creating signed kubelete certificate: %w", err)
}
// Create static pods directory for all nodes (the Kubelets on the worker nodes also expect the path to exist)
log.Infof("Creating static Pod directory /etc/kubernetes/manifests")
log.Info("Creating static Pod directory /etc/kubernetes/manifests")
if err := os.MkdirAll("/etc/kubernetes/manifests", os.ModePerm); err != nil {
return nil, fmt.Errorf("creating static pods directory: %w", err)
}
// initialize the cluster
log.Infof("Initializing the cluster using kubeadm init")
log.Info("Initializing the cluster using kubeadm init")
skipPhases := "--skip-phases=preflight,certs"
if !conformanceMode {
skipPhases += ",addon/kube-proxy"
@ -159,11 +158,11 @@ func (k *KubernetesUtil) InitCluster(
}
return nil, fmt.Errorf("kubeadm init: %w", err)
}
log.With(zap.String("output", string(out))).Infof("kubeadm init succeeded")
log.With(slog.String("output", string(out))).Info("kubeadm init succeeded")
userName := clusterName + "-admin"
log.With(zap.String("userName", userName)).Infof("Creating admin kubeconfig file")
log.With(slog.String("userName", userName)).Info("Creating admin kubeconfig file")
cmd = exec.CommandContext(
ctx, constants.KubeadmPath, "kubeconfig", "user",
"--client-name", userName, "--config", initConfigFile.Name(), "--org", user.SystemPrivilegedGroup,
@ -176,12 +175,12 @@ func (k *KubernetesUtil) InitCluster(
}
return nil, fmt.Errorf("kubeadm kubeconfig user: %w", err)
}
log.Infof("kubeadm kubeconfig user succeeded")
log.Info("kubeadm kubeconfig user succeeded")
return out, nil
}
// JoinCluster joins existing Kubernetes cluster using kubeadm join.
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log *logger.Logger) error {
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log *slog.Logger) error {
// TODO(3u13r): audit policy should be user input
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
if err != nil {
@ -201,7 +200,7 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log
}
// Create static pods directory for all nodes (the Kubelets on the worker nodes also expect the path to exist)
log.Infof("Creating static Pod directory /etc/kubernetes/manifests")
log.Info("Creating static Pod directory /etc/kubernetes/manifests")
if err := os.MkdirAll("/etc/kubernetes/manifests", os.ModePerm); err != nil {
return fmt.Errorf("creating static pods directory: %w", err)
}
@ -216,7 +215,7 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log
}
return fmt.Errorf("kubeadm join: %w", err)
}
log.With(zap.String("output", string(out))).Infof("kubeadm join succeeded")
log.With(slog.String("output", string(out))).Info("kubeadm join succeeded")
return nil
}

View file

@ -8,15 +8,15 @@ package kubernetes
import (
"context"
"log/slog"
"net"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions/components"
)
type clusterUtil interface {
InstallComponents(ctx context.Context, kubernetesComponents components.Components) error
InitCluster(ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *logger.Logger) ([]byte, error)
JoinCluster(ctx context.Context, joinConfig []byte, log *logger.Logger) error
InitCluster(ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *slog.Logger) ([]byte, error)
JoinCluster(ctx context.Context, joinConfig []byte, log *slog.Logger) error
StartKubelet() error
}

View file

@ -10,6 +10,7 @@ package kubernetes
import (
"context"
"fmt"
"log/slog"
"net"
"regexp"
"strings"
@ -20,10 +21,8 @@ import (
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/role"
"github.com/edgelesssys/constellation/v2/internal/versions/components"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
@ -69,9 +68,9 @@ func New(cloudProvider string, clusterUtil clusterUtil, configProvider configura
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
func (k *KubeWrapper) InitCluster(
ctx context.Context, versionString, clusterName string, conformanceMode bool, kubernetesComponents components.Components, apiServerCertSANs []string, serviceCIDR string, log *logger.Logger,
ctx context.Context, versionString, clusterName string, conformanceMode bool, kubernetesComponents components.Components, apiServerCertSANs []string, serviceCIDR string, log *slog.Logger,
) ([]byte, error) {
log.With(zap.String("version", versionString)).Infof("Installing Kubernetes components")
log.With(slog.String("version", versionString)).Info("Installing Kubernetes components")
if err := k.clusterUtil.InstallComponents(ctx, kubernetesComponents); err != nil {
return nil, err
}
@ -79,7 +78,7 @@ func (k *KubeWrapper) InitCluster(
var validIPs []net.IP
// Step 1: retrieve cloud metadata for Kubernetes configuration
log.Infof("Retrieving node metadata")
log.Info("Retrieving node metadata")
instance, err := k.providerMetadata.Self(ctx)
if err != nil {
return nil, fmt.Errorf("retrieving own instance metadata: %w", err)
@ -108,14 +107,14 @@ func (k *KubeWrapper) InitCluster(
certSANs = append(certSANs, apiServerCertSANs...)
log.With(
zap.String("nodeName", nodeName),
zap.String("providerID", instance.ProviderID),
zap.String("nodeIP", nodeIP),
zap.String("controlPlaneHost", controlPlaneHost),
zap.String("controlPlanePort", controlPlanePort),
zap.String("certSANs", strings.Join(certSANs, ",")),
zap.String("podCIDR", subnetworkPodCIDR),
).Infof("Setting information for node")
slog.String("nodeName", nodeName),
slog.String("providerID", instance.ProviderID),
slog.String("nodeIP", nodeIP),
slog.String("controlPlaneHost", controlPlaneHost),
slog.String("controlPlanePort", controlPlanePort),
slog.String("certSANs", strings.Join(certSANs, ",")),
slog.String("podCIDR", subnetworkPodCIDR),
).Info("Setting information for node")
// Step 2: configure kubeadm init config
ccmSupported := cloudprovider.FromString(k.cloudProvider) == cloudprovider.Azure ||
@ -133,7 +132,7 @@ func (k *KubeWrapper) InitCluster(
if err != nil {
return nil, fmt.Errorf("encoding kubeadm init configuration as YAML: %w", err)
}
log.Infof("Initializing Kubernetes cluster")
log.Info("Initializing Kubernetes cluster")
kubeConfig, err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, clusterName, validIPs, conformanceMode, log)
if err != nil {
return nil, fmt.Errorf("kubeadm init: %w", err)
@ -186,7 +185,7 @@ func (k *KubeWrapper) InitCluster(
}
// JoinCluster joins existing Kubernetes cluster.
func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components, log *logger.Logger) error {
func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components, log *slog.Logger) error {
log.With("k8sComponents", k8sComponents).Infof("Installing provided kubernetes components")
if err := k.clusterUtil.InstallComponents(ctx, k8sComponents); err != nil {
return fmt.Errorf("installing kubernetes components: %w", err)
@ -214,12 +213,12 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
args.APIServerEndpoint = net.JoinHostPort(loadBalancerHost, loadBalancerPort)
log.With(
zap.String("nodeName", nodeName),
zap.String("providerID", providerID),
zap.String("nodeIP", nodeInternalIP),
zap.String("loadBalancerHost", loadBalancerHost),
zap.String("loadBalancerPort", loadBalancerPort),
).Infof("Setting information for node")
slog.String("nodeName", nodeName),
slog.String("providerID", providerID),
slog.String("nodeIP", nodeInternalIP),
slog.String("loadBalancerHost", loadBalancerHost),
slog.String("loadBalancerPort", loadBalancerPort),
).Info("Setting information for node")
// Step 2: configure kubeadm join config
ccmSupported := cloudprovider.FromString(k.cloudProvider) == cloudprovider.Azure ||
@ -238,7 +237,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
if err != nil {
return fmt.Errorf("encoding kubeadm join configuration as YAML: %w", err)
}
log.With(zap.String("apiServerEndpoint", args.APIServerEndpoint)).Infof("Joining Kubernetes cluster")
log.With(slog.String("apiServerEndpoint", args.APIServerEndpoint)).Info("Joining Kubernetes cluster")
if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, log); err != nil {
return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err)
}

View file

@ -9,6 +9,7 @@ package kubernetes
import (
"context"
"errors"
"log/slog"
"net"
"strconv"
"testing"
@ -187,7 +188,7 @@ func TestInitCluster(t *testing.T) {
_, err := kube.InitCluster(
context.Background(), string(tc.k8sVersion), "kubernetes",
false, nil, nil, "", logger.NewTest(t),
false, nil, nil, "", slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
)
if tc.wantErr {
@ -359,7 +360,7 @@ func TestJoinCluster(t *testing.T) {
getIPAddr: func() (string, error) { return privateIP, nil },
}
err := kube.JoinCluster(context.Background(), joinCommand, tc.role, tc.k8sComponents, logger.NewTest(t))
err := kube.JoinCluster(context.Background(), joinCommand, tc.role, tc.k8sComponents, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)))
if tc.wantErr {
assert.Error(err)
return
@ -440,7 +441,7 @@ func (s *stubClusterUtil) InstallComponents(_ context.Context, _ components.Comp
return s.installComponentsErr
}
func (s *stubClusterUtil) InitCluster(_ context.Context, initConfig []byte, _, _ string, _ []net.IP, _ bool, _ *logger.Logger) ([]byte, error) {
func (s *stubClusterUtil) InitCluster(_ context.Context, initConfig []byte, _, _ string, _ []net.IP, _ bool, _ *slog.Logger) ([]byte, error) {
s.initConfigs = append(s.initConfigs, initConfig)
return s.kubeconfig, s.initClusterErr
}
@ -465,7 +466,7 @@ func (s *stubClusterUtil) SetupNodeOperator(_ context.Context, _ k8sapi.Client,
return s.setupNodeOperatorErr
}
func (s *stubClusterUtil) JoinCluster(_ context.Context, joinConfig []byte, _ *logger.Logger) error {
func (s *stubClusterUtil) JoinCluster(_ context.Context, joinConfig []byte, _ *slog.Logger) error {
s.joinConfigs = append(s.joinConfigs, joinConfig)
return s.joinClusterErr
}

View file

@ -212,7 +212,6 @@ func runApply(cmd *cobra.Command, _ []string) error {
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
spinner, err := newSpinnerOrStderr(cmd)
if err != nil {
return err
@ -396,7 +395,7 @@ func (a *applyCmd) apply(
// Apply Attestation Config
if !a.flags.skipPhases.contains(skipAttestationConfigPhase) {
a.log.Debugf("Applying new attestation config to cluster")
a.log.Debug("Applying new attestation config to cluster")
if err := a.applyJoinConfig(cmd, conf.GetAttestationConfig(), stateFile.ClusterValues.MeasurementSalt); err != nil {
return fmt.Errorf("applying attestation config: %w", err)
}
@ -443,7 +442,7 @@ func (a *applyCmd) apply(
func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationconfigapi.Fetcher) (*config.Config, *state.State, error) {
// Read user's config and state file
a.log.Debugf("Reading config from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
a.log.Debug("Reading config from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
conf, err := config.New(a.fileHandler, constants.ConfigFilename, configFetcher, a.flags.force)
var configValidationErr *config.ValidationError
if errors.As(err, &configValidationErr) {
@ -453,7 +452,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
return nil, nil, err
}
a.log.Debugf("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
a.log.Debug("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
stateFile, err := state.CreateOrRead(a.fileHandler, constants.StateFilename)
if err != nil {
return nil, nil, err
@ -473,7 +472,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
// in which case the workspace has to be clean
if preCreateValidateErr == nil {
// We can't skip the infrastructure phase if no infrastructure has been defined
a.log.Debugf("State file is in pre-create state, checking workspace")
a.log.Debug("State file is in pre-create state, checking workspace")
if a.flags.skipPhases.contains(skipInfrastructurePhase) {
return nil, nil, preInitValidateErr
}
@ -482,7 +481,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
return nil, nil, err
}
a.log.Debugf("No Terraform state found in current working directory. Preparing to create a new cluster.")
a.log.Debug("No Terraform state found in current working directory. Preparing to create a new cluster.")
printCreateWarnings(cmd.ErrOrStderr(), conf)
}
@ -491,7 +490,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
// If so, we need to run the init RPC
if preInitValidateErr == nil || (preCreateValidateErr == nil && !a.flags.skipPhases.contains(skipInitPhase)) {
// We can't skip the init phase if the init RPC hasn't been run yet
a.log.Debugf("State file is in pre-init state, checking workspace")
a.log.Debug("State file is in pre-init state, checking workspace")
if a.flags.skipPhases.contains(skipInitPhase) {
return nil, nil, postInitValidateErr
}
@ -507,7 +506,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
// If the state file is in a post-init state,
// we need to make sure specific files exist in the workspace
if postInitValidateErr == nil {
a.log.Debugf("State file is in post-init state, checking workspace")
a.log.Debug("State file is in post-init state, checking workspace")
if err := a.checkPostInitFilesExist(); err != nil {
return nil, nil, err
}
@ -522,16 +521,16 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
// If we need to run the init RPC, the version has to be valid
// Otherwise, we are able to use an outdated version, meaning we skip the K8s upgrade
// We skip version validation if the user explicitly skips the Kubernetes phase
a.log.Debugf("Validating Kubernetes version %s", conf.KubernetesVersion)
a.log.Debug("Validating Kubernetes version %s", conf.KubernetesVersion)
validVersion, err := versions.NewValidK8sVersion(string(conf.KubernetesVersion), true)
if err != nil {
a.log.Debugf("Kubernetes version not valid: %s", err)
a.log.Debug("Kubernetes version not valid: %s", err)
if !a.flags.skipPhases.contains(skipInitPhase) {
return nil, nil, err
}
if !a.flags.skipPhases.contains(skipK8sPhase) {
a.log.Debugf("Checking if user wants to continue anyway")
a.log.Debug("Checking if user wants to continue anyway")
if !a.flags.yes {
confirmed, err := askToConfirm(cmd,
fmt.Sprintf(
@ -548,7 +547,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
}
a.flags.skipPhases.add(skipK8sPhase)
a.log.Debugf("Outdated Kubernetes version accepted, Kubernetes upgrade will be skipped")
a.log.Debug("Outdated Kubernetes version accepted, Kubernetes upgrade will be skipped")
}
validVersionString, err := versions.ResolveK8sPatchVersion(xsemver.MajorMinor(string(conf.KubernetesVersion)))
@ -564,7 +563,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
cmd.PrintErrf("Warning: Constellation with Kubernetes %s is still in preview. Use only for evaluation purposes.\n", validVersion)
}
conf.KubernetesVersion = validVersion
a.log.Debugf("Target Kubernetes version set to %s", conf.KubernetesVersion)
a.log.Debug("Target Kubernetes version set to %s", conf.KubernetesVersion)
// Validate microservice version (helm versions) in the user's config matches the version of the CLI
// This makes sure we catch potential errors early, not just after we already ran Terraform migrations or the init RPC
@ -592,9 +591,9 @@ func (a *applyCmd) applyJoinConfig(cmd *cobra.Command, newConfig config.Attestat
) error {
clusterAttestationConfig, err := a.applier.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant())
if err != nil {
a.log.Debugf("Getting cluster attestation config failed: %s", err)
a.log.Debug("Getting cluster attestation config failed: %s", err)
if k8serrors.IsNotFound(err) {
a.log.Debugf("Creating new join config")
a.log.Debug("Creating new join config")
return a.applier.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt)
}
return fmt.Errorf("getting cluster attestation config: %w", err)
@ -606,7 +605,7 @@ func (a *applyCmd) applyJoinConfig(cmd *cobra.Command, newConfig config.Attestat
return fmt.Errorf("comparing attestation configs: %w", err)
}
if equal {
a.log.Debugf("Current attestation config is equal to the new config, nothing to do")
a.log.Debug("Current attestation config is equal to the new config, nothing to do")
return nil
}
@ -685,7 +684,7 @@ func (a *applyCmd) checkCreateFilesClean() error {
if err := a.checkInitFilesClean(); err != nil {
return err
}
a.log.Debugf("Checking Terraform state")
a.log.Debug("Checking Terraform state")
if _, err := a.fileHandler.Stat(constants.TerraformWorkingDir); err == nil {
return fmt.Errorf(
"terraform state %q already exists in working directory, run 'constellation terminate' before creating a new cluster",
@ -700,7 +699,7 @@ func (a *applyCmd) checkCreateFilesClean() error {
// checkInitFilesClean ensures that the workspace is clean before running the init RPC.
func (a *applyCmd) checkInitFilesClean() error {
a.log.Debugf("Checking admin configuration file")
a.log.Debug("Checking admin configuration file")
if _, err := a.fileHandler.Stat(constants.AdminConfFilename); err == nil {
return fmt.Errorf(
"file %q already exists in working directory, run 'constellation terminate' before creating a new cluster",
@ -709,7 +708,7 @@ func (a *applyCmd) checkInitFilesClean() error {
} else if !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("checking for %q: %w", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename), err)
}
a.log.Debugf("Checking master secrets file")
a.log.Debug("Checking master secrets file")
if _, err := a.fileHandler.Stat(constants.MasterSecretFilename); err == nil {
return fmt.Errorf(
"file %q already exists in working directory. Constellation won't overwrite previous master secrets. Move it somewhere or delete it before creating a new cluster",
@ -807,18 +806,18 @@ type warnLogger struct {
// Infof messages are reduced to debug messages, since we don't want
// the extra info when using the CLI without setting the debug flag.
func (wl warnLogger) Infof(fmtStr string, args ...any) {
wl.log.Debugf(fmtStr, args...)
func (wl warnLogger) Info(fmtStr string, args ...any) {
wl.log.Debug(fmtStr, args...)
}
// Warnf prints a formatted warning from the validator.
func (wl warnLogger) Warnf(fmtStr string, args ...any) {
func (wl warnLogger) Warn(fmtStr string, args ...any) {
wl.cmd.PrintErrf("Warning: %s\n", fmt.Sprintf(fmtStr, args...))
}
type warnLog interface {
Warnf(format string, args ...any)
Infof(format string, args ...any)
Warn(format string, args ...any)
Info(format string, args ...any)
}
// applier is used to run the different phases of the apply command.

View file

@ -12,6 +12,7 @@ import (
"errors"
"fmt"
"io"
"log/slog"
"path/filepath"
"strings"
"testing"
@ -196,7 +197,7 @@ func TestBackupHelmCharts(t *testing.T) {
applier: &stubConstellApplier{
stubKubernetesUpgrader: tc.backupClient,
},
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
}
err := a.backupHelmCharts(context.Background(), tc.helmApplier, tc.includesUpgrades, "")
@ -442,7 +443,7 @@ func TestValidateInputs(t *testing.T) {
cmd.SetIn(bytes.NewBufferString(tc.stdin))
a := applyCmd{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
fileHandler: fileHandler,
flags: tc.flags,
}

View file

@ -25,7 +25,7 @@ import (
// runHelmApply handles installing or upgrading helm charts for the cluster.
func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string,
) error {
a.log.Debugf("Installing or upgrading Helm charts")
a.log.Debug("Installing or upgrading Helm charts")
var masterSecret uri.MasterSecret
if err := a.fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret); err != nil {
return fmt.Errorf("reading master secret: %w", err)
@ -44,13 +44,13 @@ func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFi
AllowDestructive: helm.DenyDestructive,
}
a.log.Debugf("Getting service account URI")
a.log.Debug("Getting service account URI")
serviceAccURI, err := cloudcmd.GetMarshaledServiceAccountURI(conf, a.fileHandler)
if err != nil {
return err
}
a.log.Debugf("Preparing Helm charts")
a.log.Debug("Preparing Helm charts")
executor, includesUpgrades, err := a.applier.PrepareHelmCharts(options, stateFile, serviceAccURI, masterSecret, conf.Provider.OpenStack)
if errors.Is(err, helm.ErrConfirmationMissing) {
if !a.flags.yes {
@ -75,12 +75,12 @@ func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFi
cmd.PrintErrln(err)
}
a.log.Debugf("Backing up Helm charts")
a.log.Debug("Backing up Helm charts")
if err := a.backupHelmCharts(cmd.Context(), executor, includesUpgrades, upgradeDir); err != nil {
return err
}
a.log.Debugf("Applying Helm charts")
a.log.Debug("Applying Helm charts")
if !a.flags.skipPhases.contains(skipInitPhase) {
a.spinner.Start("Installing Kubernetes components ", false)
} else {
@ -108,10 +108,10 @@ func (a *applyCmd) backupHelmCharts(
if err := executor.SaveCharts(chartDir, a.fileHandler); err != nil {
return fmt.Errorf("saving Helm charts to disk: %w", err)
}
a.log.Debugf("Helm charts saved to %s", a.flags.pathPrefixer.PrefixPrintablePath(chartDir))
a.log.Debug("Helm charts saved to %s", a.flags.pathPrefixer.PrefixPrintablePath(chartDir))
if includesUpgrades {
a.log.Debugf("Creating backup of CRDs and CRs")
a.log.Debug("Creating backup of CRDs and CRs")
crds, err := a.applier.BackupCRDs(ctx, a.fileHandler, upgradeDir)
if err != nil {
return fmt.Errorf("creating CRD backup: %w", err)

View file

@ -29,13 +29,13 @@ import (
// On success, it writes the Kubernetes admin config file to disk.
// Therefore it is skipped if the Kubernetes admin config file already exists.
func (a *applyCmd) runInit(cmd *cobra.Command, conf *config.Config, stateFile *state.State) (*bytes.Buffer, error) {
a.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())
a.log.Debug("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())
validator, err := choose.Validator(conf.GetAttestationConfig(), a.wLog)
if err != nil {
return nil, fmt.Errorf("creating validator: %w", err)
}
a.log.Debugf("Running init RPC")
a.log.Debug("Running init RPC")
masterSecret, err := a.generateAndPersistMasterSecret(cmd.OutOrStdout())
if err != nil {
return nil, fmt.Errorf("generating master secret: %w", err)
@ -74,9 +74,9 @@ func (a *applyCmd) runInit(cmd *cobra.Command, conf *config.Config, stateFile *s
}
return nil, err
}
a.log.Debugf("Initialization request successful")
a.log.Debug("Initialization request successful")
a.log.Debugf("Buffering init success message")
a.log.Debug("Buffering init success message")
bufferedOutput := &bytes.Buffer{}
if err := a.writeInitOutput(stateFile, resp, a.flags.mergeConfigs, bufferedOutput, measurementSalt); err != nil {
return nil, err
@ -121,7 +121,7 @@ func (a *applyCmd) writeInitOutput(
if err := a.fileHandler.Write(constants.AdminConfFilename, initResp.Kubeconfig, file.OptNone); err != nil {
return fmt.Errorf("writing kubeconfig: %w", err)
}
a.log.Debugf("Kubeconfig written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
a.log.Debug("Kubeconfig written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
if mergeConfig {
if err := a.merger.mergeConfigs(constants.AdminConfFilename, a.fileHandler); err != nil {
@ -136,7 +136,7 @@ func (a *applyCmd) writeInitOutput(
return fmt.Errorf("writing Constellation state file: %w", err)
}
a.log.Debugf("Constellation state file written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
a.log.Debug("Constellation state file written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
if !mergeConfig {
fmt.Fprintln(wr, "You can now connect to your cluster by executing:")

View file

@ -23,7 +23,7 @@ import (
// runTerraformApply checks if changes to Terraform are required and applies them.
func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string) error {
a.log.Debugf("Checking if Terraform migrations are required")
a.log.Debug("Checking if Terraform migrations are required")
terraformClient, removeClient, err := a.newInfraApplier(cmd.Context())
if err != nil {
return fmt.Errorf("creating Terraform client: %w", err)
@ -39,18 +39,18 @@ func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, st
if changesRequired, err := a.planTerraformChanges(cmd, conf, terraformClient); err != nil {
return fmt.Errorf("planning Terraform migrations: %w", err)
} else if !changesRequired {
a.log.Debugf("No changes to infrastructure required, skipping Terraform migrations")
a.log.Debug("No changes to infrastructure required, skipping Terraform migrations")
return nil
}
a.log.Debugf("Apply new Terraform resources for infrastructure changes")
a.log.Debug("Apply new Terraform resources for infrastructure changes")
newInfraState, err := a.applyTerraformChanges(cmd, conf, terraformClient, upgradeDir, isNewCluster)
if err != nil {
return err
}
// Merge the original state with the new infrastructure values
a.log.Debugf("Updating state file with new infrastructure state")
a.log.Debug("Updating state file with new infrastructure state")
if _, err := stateFile.Merge(
// temporary state with new infrastructure values
state.New().SetInfrastructure(newInfraState),
@ -68,7 +68,7 @@ func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, st
// planTerraformChanges checks if any changes to the Terraform state are required.
// If no state exists, this function will return true and the caller should create a new state.
func (a *applyCmd) planTerraformChanges(cmd *cobra.Command, conf *config.Config, terraformClient cloudApplier) (bool, error) {
a.log.Debugf("Planning Terraform changes")
a.log.Debug("Planning Terraform changes")
// Check if there are any Terraform changes to apply
@ -76,7 +76,7 @@ func (a *applyCmd) planTerraformChanges(cmd *cobra.Command, conf *config.Config,
//
// var manualMigrations []terraform.StateMigration
// for _, migration := range manualMigrations {
// u.log.Debugf("Adding manual Terraform migration: %s", migration.DisplayName)
// u.log.Debug("Adding manual Terraform migration: %s", migration.DisplayName)
// u.infraApplier.AddManualStateMigration(migration)
// }
@ -146,7 +146,7 @@ func (a *applyCmd) applyTerraformChangesWithMessage(
return state.Infrastructure{}, errors.New(abortErrorMsg)
}
}
a.log.Debugf("Applying Terraform changes")
a.log.Debug("Applying Terraform changes")
a.spinner.Start(progressMsg, false)
infraState, err := terraformClient.Apply(cmd.Context(), csp, attestation, rollbackBehavior)
@ -186,7 +186,7 @@ func printCreateInfo(out io.Writer, conf *config.Config, log debugLog) error {
}
}
if len(otherGroupNames) > 0 {
log.Debugf("Creating %d additional node groups: %v", len(otherGroupNames), otherGroupNames)
log.Debug("Creating %d additional node groups: %v", len(otherGroupNames), otherGroupNames)
}
fmt.Fprintf(out, "The following Constellation cluster will be created:\n")

View file

@ -93,7 +93,6 @@ func runConfigFetchMeasurements(cmd *cobra.Command, _ []string) error {
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
fileHandler := file.NewHandler(afero.NewOsFs())
rekor, err := sigstore.NewRekor()
if err != nil {
@ -105,7 +104,7 @@ func runConfigFetchMeasurements(cmd *cobra.Command, _ []string) error {
if err := cfm.flags.parse(cmd.Flags()); err != nil {
return fmt.Errorf("parsing flags: %w", err)
}
cfm.log.Debugf("Using flags %+v", cfm.flags)
cfm.log.Debug("Using flags %+v", cfm.flags)
fetcher := attestationconfigapi.NewFetcherWithClient(http.DefaultClient, constants.CDNRepositoryURL)
return cfm.configFetchMeasurements(cmd, fileHandler, fetcher)
@ -119,7 +118,7 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements(
return errors.New("fetching measurements is not supported")
}
cfm.log.Debugf("Loading configuration file from %q", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
cfm.log.Debug("Loading configuration file from %q", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
conf, err := config.New(fileHandler, constants.ConfigFilename, fetcher, cfm.flags.force)
var configValidationErr *config.ValidationError
@ -134,11 +133,11 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements(
cmd.PrintErrln("Configured image doesn't look like a released production image. Double check image before deploying to production.")
}
cfm.log.Debugf("Creating context")
cfm.log.Debug("Creating context")
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
cfm.log.Debugf("Updating URLs")
cfm.log.Debug("Updating URLs")
if err := cfm.flags.updateURLs(conf); err != nil {
return err
}
@ -155,12 +154,12 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements(
}
cfm.log.Debugf("Measurements: %#v\n", fetchedMeasurements)
cfm.log.Debugf("Updating measurements in configuration")
cfm.log.Debug("Updating measurements in configuration")
conf.UpdateMeasurements(fetchedMeasurements)
if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil {
return err
}
cfm.log.Debugf("Configuration written to %s", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
cfm.log.Debug("Configuration written to %s", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
cmd.Print("Successfully fetched measurements and updated Configuration\n")
return nil
}

View file

@ -8,6 +8,7 @@ package cmd
import (
"context"
"log/slog"
"net/http"
"net/url"
"testing"
@ -180,7 +181,7 @@ func TestConfigFetchMeasurements(t *testing.T) {
err := fileHandler.WriteYAML(constants.ConfigFilename, gcpConfig, file.OptMkdirAll)
require.NoError(err)
fetcher := stubVerifyFetcher{err: tc.err}
cfm := &configFetchMeasurementsCmd{canFetchMeasurements: true, log: logger.NewTest(t), verifyFetcher: fetcher}
cfm := &configFetchMeasurementsCmd{canFetchMeasurements: true, log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), verifyFetcher: fetcher}
cfm.flags.insecure = tc.insecureFlag
cfm.flags.force = true

View file

@ -8,6 +8,7 @@ package cmd
import (
"fmt"
"log/slog"
"strings"
"testing"
@ -92,7 +93,7 @@ func TestConfigGenerateDefault(t *testing.T) {
cmd := newConfigGenerateCmd()
cg := &configGenerateCmd{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
flags: generateFlags{
attestationVariant: variant.Dummy{},
k8sVersion: versions.Default,
@ -144,7 +145,7 @@ func TestConfigGenerateDefaultProviderSpecific(t *testing.T) {
wantConf.RemoveProviderAndAttestationExcept(tc.provider)
cg := &configGenerateCmd{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
flags: generateFlags{
attestationVariant: variant.Dummy{},
k8sVersion: versions.Default,
@ -177,7 +178,7 @@ func TestConfigGenerateDefaultExists(t *testing.T) {
cmd := newConfigGenerateCmd()
cg := &configGenerateCmd{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
flags: generateFlags{attestationVariant: variant.Dummy{}},
}
require.Error(cg.configGenerate(cmd, fileHandler, cloudprovider.Unknown, ""))

View file

@ -9,6 +9,7 @@ package cmd
import (
"bytes"
"context"
"log/slog"
"testing"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
@ -227,7 +228,7 @@ func TestCreate(t *testing.T) {
skipPhases: newPhases(skipInitPhase, skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipImagePhase, skipK8sPhase),
},
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
spinner: &nopSpinner{},
newInfraApplier: func(_ context.Context) (cloudApplier, func(), error) {
@ -295,7 +296,7 @@ func TestCheckDirClean(t *testing.T) {
for _, f := range tc.existingFiles {
require.NoError(fh.Write(f, []byte{1, 2, 3}, file.OptNone))
}
a := &applyCmd{log: logger.NewTest(t), fileHandler: fh}
a := &applyCmd{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), fileHandler: fh}
err := a.checkInitFilesClean()
if tc.wantErr {

View file

@ -82,7 +82,6 @@ func runIAMCreate(cmd *cobra.Command, providerCreator providerIAMCreator, provid
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
iamCreator := &iamCreator{
cmd: cmd,
@ -134,7 +133,7 @@ func (c *iamCreator) create(ctx context.Context) error {
var conf config.Config
if c.flags.updateConfig {
c.log.Debugf("Parsing config %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
c.log.Debug("Parsing config %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
if err := c.fileHandler.ReadYAML(constants.ConfigFilename, &conf); err != nil {
return fmt.Errorf("error reading the configuration file: %w", err)
}
@ -154,7 +153,7 @@ func (c *iamCreator) create(ctx context.Context) error {
return err
}
c.cmd.Println() // Print empty line to separate after spinner ended.
c.log.Debugf("Successfully created the IAM cloud resources")
c.log.Debug("Successfully created the IAM cloud resources")
err = c.providerCreator.parseAndWriteIDFile(iamFile, c.fileHandler)
if err != nil {
@ -162,7 +161,7 @@ func (c *iamCreator) create(ctx context.Context) error {
}
if c.flags.updateConfig {
c.log.Debugf("Writing IAM configuration to %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
c.log.Debug("Writing IAM configuration to %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
c.providerCreator.writeOutputValuesToConfig(&conf, iamFile)
if err := c.fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil {
return err

View file

@ -8,6 +8,7 @@ package cmd
import (
"bytes"
"encoding/base64"
"log/slog"
"strings"
"testing"
@ -209,7 +210,7 @@ func TestIAMCreateAWS(t *testing.T) {
iamCreator := &iamCreator{
cmd: cmd,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
spinner: &nopSpinner{},
creator: tc.creator,
fileHandler: fileHandler,
@ -385,7 +386,7 @@ func TestIAMCreateAzure(t *testing.T) {
iamCreator := &iamCreator{
cmd: cmd,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
spinner: &nopSpinner{},
creator: tc.creator,
fileHandler: fileHandler,
@ -576,7 +577,7 @@ func TestIAMCreateGCP(t *testing.T) {
iamCreator := &iamCreator{
cmd: cmd,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
spinner: &nopSpinner{},
creator: tc.creator,
fileHandler: fileHandler,

View file

@ -58,7 +58,6 @@ func runIAMDestroy(cmd *cobra.Command, _ []string) error {
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
spinner := newSpinner(cmd.ErrOrStderr())
destroyer := cloudcmd.NewIAMDestroyer()
fsHandler := file.NewHandler(afero.NewOsFs())
@ -78,25 +77,25 @@ type destroyCmd struct {
func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destroyer iamDestroyer, fsHandler file.Handler) error {
// check if there is a possibility that the cluster is still running by looking out for specific files
c.log.Debugf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
c.log.Debug("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
if _, err := fsHandler.Stat(constants.AdminConfFilename); !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
}
c.log.Debugf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
c.log.Debug("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
if _, err := fsHandler.Stat(constants.StateFilename); !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
}
gcpFileExists := false
c.log.Debugf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
c.log.Debug("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
if _, err := fsHandler.Stat(constants.GCPServiceAccountKeyFilename); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return err
}
} else {
c.log.Debugf("%q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
c.log.Debug("%q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
gcpFileExists = true
}
@ -117,7 +116,7 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
}
if gcpFileExists {
c.log.Debugf("Starting to delete %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
c.log.Debug("Starting to delete %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
proceed, err := c.deleteGCPServiceAccountKeyFile(cmd, destroyer, fsHandler)
if err != nil {
return err
@ -128,7 +127,7 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
}
}
c.log.Debugf("Starting to destroy IAM configuration")
c.log.Debug("Starting to destroy IAM configuration")
spinner.Start("Destroying IAM configuration", false)
defer spinner.Stop()
@ -144,18 +143,18 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
func (c *destroyCmd) deleteGCPServiceAccountKeyFile(cmd *cobra.Command, destroyer iamDestroyer, fsHandler file.Handler) (bool, error) {
var fileSaKey gcpshared.ServiceAccountKey
c.log.Debugf("Parsing %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
c.log.Debug("Parsing %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
if err := fsHandler.ReadJSON(constants.GCPServiceAccountKeyFilename, &fileSaKey); err != nil {
return false, err
}
c.log.Debugf("Getting service account key from the tfstate")
c.log.Debug("Getting service account key from the tfstate")
tfSaKey, err := destroyer.GetTfStateServiceAccountKey(cmd.Context(), constants.TerraformIAMWorkingDir)
if err != nil {
return false, err
}
c.log.Debugf("Checking if keys are the same")
c.log.Debug("Checking if keys are the same")
if tfSaKey != fileSaKey {
cmd.Printf(
"The key in %q don't match up with your Terraform state. %q will not be deleted.\n",
@ -169,6 +168,6 @@ func (c *destroyCmd) deleteGCPServiceAccountKeyFile(cmd *cobra.Command, destroye
return false, err
}
c.log.Debugf("Successfully deleted %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
c.log.Debug("Successfully deleted %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
return true, nil
}

View file

@ -7,6 +7,7 @@ package cmd
import (
"bytes"
"errors"
"log/slog"
"testing"
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
@ -106,7 +107,7 @@ func TestIAMDestroy(t *testing.T) {
cmd.SetErr(&bytes.Buffer{})
cmd.SetIn(bytes.NewBufferString(tc.stdin))
c := &destroyCmd{log: logger.NewTest(t), flags: iamDestroyFlags{
c := &destroyCmd{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), flags: iamDestroyFlags{
yes: tc.yesFlag,
}}
@ -196,7 +197,7 @@ func TestDeleteGCPServiceAccountKeyFile(t *testing.T) {
cmd.SetErr(&bytes.Buffer{})
cmd.SetIn(bytes.NewBufferString(tc.stdin))
c := &destroyCmd{log: logger.NewTest(t)}
c := &destroyCmd{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))}
proceed, err := c.deleteGCPServiceAccountKeyFile(cmd, tc.destroyer, tc.fsHandler)
if tc.wantErr {

View file

@ -149,7 +149,7 @@ func (i iamUpgradeApplyCmd) iamUpgradeApply(cmd *cobra.Command, iamUpgrader iamU
return errors.New("IAM upgrade aborted by user")
}
}
i.log.Debugf("Applying Terraform IAM migrations")
i.log.Debug("Applying Terraform IAM migrations")
if err := iamUpgrader.ApplyIAMUpgrade(cmd.Context(), conf.GetProvider()); err != nil {
return fmt.Errorf("applying terraform migrations: %w", err)
}

View file

@ -8,6 +8,7 @@ package cmd
import (
"context"
"io"
"log/slog"
"path/filepath"
"strings"
"testing"
@ -131,7 +132,7 @@ func TestIamUpgradeApply(t *testing.T) {
iamUpgradeApplyCmd := &iamUpgradeApplyCmd{
fileHandler: tc.fh,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
configFetcher: tc.configFetcher,
flags: iamUpgradeApplyFlags{
yes: tc.yesFlag,

View file

@ -72,7 +72,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand
clientcmd.RecommendedHomeFile,
configPath, // our config should overwrite the default config
}
c.log.Debugf("Kubeconfig file loading precedence: %v", loadingRules.Precedence)
c.log.Debug("Kubeconfig file loading precedence: %v", loadingRules.Precedence)
// merge the kubeconfigs
cfg, err := loadingRules.Load()
@ -82,7 +82,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand
// Set the current context to the cluster we just created
cfg.CurrentContext = constellConfig.CurrentContext
c.log.Debugf("Set current context to %s", cfg.CurrentContext)
c.log.Debug("Set current context to %s", cfg.CurrentContext)
json, err := runtime.Encode(clientcodec.Codec, cfg)
if err != nil {
@ -97,7 +97,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand
if err := fileHandler.Write(clientcmd.RecommendedHomeFile, mergedKubeconfig, file.OptOverwrite); err != nil {
return fmt.Errorf("writing merged kubeconfig to file: %w", err)
}
c.log.Debugf("Merged kubeconfig into default config file: %s", clientcmd.RecommendedHomeFile)
c.log.Debug("Merged kubeconfig into default config file: %s", clientcmd.RecommendedHomeFile)
return nil
}

View file

@ -13,6 +13,7 @@ import (
"strings"
"testing"
"time"
"log/slog"
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
@ -228,7 +229,7 @@ func TestInitialize(t *testing.T) {
rootFlags: rootFlags{force: true},
skipPhases: newPhases(skipInfrastructurePhase),
},
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
spinner: &nopSpinner{},
merger: &stubMerger{},
applier: &stubConstellApplier{
@ -368,8 +369,8 @@ func TestWriteOutput(t *testing.T) {
fileHandler: fileHandler,
spinner: &nopSpinner{},
merger: &stubMerger{},
log: logger.NewTest(t),
applier: constellation.NewApplier(logger.NewTest(t), &nopSpinner{}, constellation.ApplyContextCLI, nil),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
applier: constellation.NewApplier(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), &nopSpinner{}, constellation.ApplyContextCLI, nil),
}
err = i.writeInitOutput(stateFile, initOutput, false, &out, measurementSalt)
require.NoError(err)
@ -460,8 +461,8 @@ func TestGenerateMasterSecret(t *testing.T) {
var out bytes.Buffer
i := &applyCmd{
fileHandler: fileHandler,
log: logger.NewTest(t),
applier: constellation.NewApplier(logger.NewTest(t), &nopSpinner{}, constellation.ApplyContextCLI, nil),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
applier: constellation.NewApplier(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), &nopSpinner{}, constellation.ApplyContextCLI, nil),
}
secret, err := i.generateAndPersistMasterSecret(&out)

View file

@ -7,25 +7,25 @@ SPDX-License-Identifier: AGPL-3.0-only
package cmd
import (
"github.com/edgelesssys/constellation/v2/internal/logger"
"log/slog"
"os"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
type debugLog interface {
Debugf(format string, args ...any)
Sync()
Debug(format string, args ...any)
}
func newCLILogger(cmd *cobra.Command) (debugLog, error) {
logLvl := zapcore.InfoLevel
logLvl := slog.LevelInfo
debugLog, err := cmd.Flags().GetBool("debug")
if err != nil {
return nil, err
}
if debugLog {
logLvl = zapcore.DebugLevel
logLvl = slog.LevelDebug
}
return logger.New(logger.PlainLog, logLvl), nil
return slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: logLvl})), nil
}

View file

@ -47,7 +47,6 @@ func runPatchMAA(cmd *cobra.Command, args []string) error {
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
p := maa.NewAzurePolicyPatcher()
@ -57,7 +56,7 @@ func runPatchMAA(cmd *cobra.Command, args []string) error {
}
func (c *maaPatchCmd) patchMAA(cmd *cobra.Command, attestationURL string) error {
c.log.Debugf("Using attestation URL %s", attestationURL)
c.log.Debug("Using attestation URL %s", attestationURL)
if err := c.patcher.Patch(cmd.Context(), attestationURL); err != nil {
return fmt.Errorf("patching MAA attestation policy: %w", err)

View file

@ -8,6 +8,7 @@ package cmd
import (
"context"
"log/slog"
"testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
@ -38,7 +39,7 @@ func TestMAAPatch(t *testing.T) {
t.Run(name, func(t *testing.T) {
require := require.New(t)
c := &maaPatchCmd{log: logger.NewTest(t), patcher: tc.patcher}
c := &maaPatchCmd{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), patcher: tc.patcher}
err := c.patchMAA(&cobra.Command{}, tc.attestationURL)
if tc.wantErr {
require.Error(err)

View file

@ -50,7 +50,6 @@ func runUp(cmd *cobra.Command, _ []string) error {
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
m := &miniUpCmd{
log: log,
@ -152,7 +151,7 @@ func (m *miniUpCmd) prepareConfig(cmd *cobra.Command) (*config.Config, error) {
if err != nil {
return nil, fmt.Errorf("mini default config is invalid: %v", err)
}
m.log.Debugf("Prepared configuration")
m.log.Debug("Prepared configuration")
return config, m.fileHandler.WriteYAML(constants.ConfigFilename, config, file.OptOverwrite)
}

View file

@ -32,12 +32,12 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error {
return fmt.Errorf("creation of a QEMU based Constellation is not supported for %s/%s, a linux/amd64 platform is required", runtime.GOOS, runtime.GOARCH)
}
m.log.Debugf("Checked arch and os")
m.log.Debug("Checked arch and os")
// check if /dev/kvm exists
if _, err := os.Stat("/dev/kvm"); err != nil {
return fmt.Errorf("unable to access KVM device: %w", err)
}
m.log.Debugf("Checked that /dev/kvm exists")
m.log.Debug("Checked that /dev/kvm exists")
// check CPU cores
if runtime.NumCPU() < 4 {
return fmt.Errorf("insufficient CPU cores: %d, at least 4 cores are required by MiniConstellation", runtime.NumCPU())
@ -45,7 +45,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error {
if runtime.NumCPU() < 6 {
fmt.Fprintf(out, "WARNING: Only %d CPU cores available. This may cause performance issues.\n", runtime.NumCPU())
}
m.log.Debugf("Checked CPU cores - there are %d", runtime.NumCPU())
m.log.Debug("Checked CPU cores - there are %d", runtime.NumCPU())
// check memory
f, err := os.Open("/proc/meminfo")
@ -63,7 +63,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error {
}
}
}
m.log.Debugf("Scanned for available memory")
m.log.Debug("Scanned for available memory")
memGB := memKB / 1024 / 1024
if memGB < 4 {
return fmt.Errorf("insufficient memory: %dGB, at least 4GB of memory are required by MiniConstellation", memGB)
@ -71,7 +71,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error {
if memGB < 6 {
fmt.Fprintln(out, "WARNING: Less than 6GB of memory available. This may cause performance issues.")
}
m.log.Debugf("Checked available memory, you have %dGB available", memGB)
m.log.Debug("Checked available memory, you have %dGB available", memGB)
var stat unix.Statfs_t
if err := unix.Statfs(".", &stat); err != nil {
@ -81,7 +81,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error {
if freeSpaceGB < 20 {
return fmt.Errorf("insufficient disk space: %dGB, at least 20GB of disk space are required by MiniConstellation", freeSpaceGB)
}
m.log.Debugf("Checked for free space available, you have %dGB available", freeSpaceGB)
m.log.Debug("Checked for free space available, you have %dGB available", freeSpaceGB)
return nil
}

View file

@ -76,7 +76,6 @@ func runRecover(cmd *cobra.Command, _ []string) error {
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
fileHandler := file.NewHandler(afero.NewOsFs())
newDialer := func(validator atls.Validator) *dialer.Dialer {
return dialer.New(nil, validator, &net.Dialer{})
@ -85,7 +84,7 @@ func runRecover(cmd *cobra.Command, _ []string) error {
if err := r.flags.parse(cmd.Flags()); err != nil {
return err
}
r.log.Debugf("Using flags: %+v", r.flags)
r.log.Debug("Using flags: %+v", r.flags)
return r.recover(cmd, fileHandler, 5*time.Second, &recoverDoer{log: r.log}, newDialer)
}
@ -94,12 +93,12 @@ func (r *recoverCmd) recover(
doer recoverDoerInterface, newDialer func(validator atls.Validator) *dialer.Dialer,
) error {
var masterSecret uri.MasterSecret
r.log.Debugf("Loading master secret file from %s", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename))
r.log.Debug("Loading master secret file from %s", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename))
if err := fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret); err != nil {
return err
}
r.log.Debugf("Loading configuration file from %q", r.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
r.log.Debug("Loading configuration file from %q", r.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
conf, err := config.New(fileHandler, constants.ConfigFilename, r.configFetcher, r.flags.force)
var configValidationErr *config.ValidationError
if errors.As(err, &configValidationErr) {
@ -130,16 +129,16 @@ func (r *recoverCmd) recover(
conf.UpdateMAAURL(stateFile.Infrastructure.Azure.AttestationURL)
}
r.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())
r.log.Debug("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())
validator, err := choose.Validator(conf.GetAttestationConfig(), warnLogger{cmd: cmd, log: r.log})
if err != nil {
return fmt.Errorf("creating new validator: %w", err)
}
r.log.Debugf("Created a new validator")
r.log.Debug("Created a new validator")
doer.setDialer(newDialer(validator), endpoint)
r.log.Debugf("Set dialer for endpoint %s", endpoint)
r.log.Debug("Set dialer for endpoint %s", endpoint)
doer.setURIs(masterSecret.EncodeToURI(), uri.NoStoreURI)
r.log.Debugf("Set secrets")
r.log.Debug("Set secrets")
if err := r.recoverCall(cmd.Context(), cmd.OutOrStdout(), interval, doer); err != nil {
if grpcRetry.ServiceIsUnavailable(err) {
return nil
@ -167,12 +166,12 @@ func (r *recoverCmd) recoverCall(ctx context.Context, out io.Writer, interval ti
})
}
r.log.Debugf("Encountered error (retriable: %t): %s", retry, err)
r.log.Debug("Encountered error (retriable: %t): %s", retry, err)
return retry
}
retrier := retry.NewIntervalRetrier(doer, interval, retryOnceOnFailure)
r.log.Debugf("Created new interval retrier")
r.log.Debug("Created new interval retrier")
err = retrier.Do(ctx)
if err != nil {
break
@ -180,7 +179,7 @@ func (r *recoverCmd) recoverCall(ctx context.Context, out io.Writer, interval ti
fmt.Fprintln(out, "Pushed recovery key.")
ctr++
}
r.log.Debugf("Retry counter is %d", ctr)
r.log.Debug("Retry counter is %d", ctr)
if ctr > 0 {
fmt.Fprintf(out, "Recovered %d control-plane nodes.\n", ctr)
} else if grpcRetry.ServiceIsUnavailable(err) {
@ -222,11 +221,11 @@ func (d *recoverDoer) Do(ctx context.Context) (retErr error) {
if err != nil {
return fmt.Errorf("dialing recovery server: %w", err)
}
d.log.Debugf("Dialed recovery server")
d.log.Debug("Dialed recovery server")
defer conn.Close()
protoClient := recoverproto.NewAPIClient(conn)
d.log.Debugf("Created protoClient")
d.log.Debug("Created protoClient")
req := &recoverproto.RecoverMessage{
KmsUri: d.kmsURI,
@ -238,7 +237,7 @@ func (d *recoverDoer) Do(ctx context.Context) (retErr error) {
return fmt.Errorf("calling recover: %w", err)
}
d.log.Debugf("Received confirmation")
d.log.Debug("Received confirmation")
return nil
}

View file

@ -10,6 +10,7 @@ import (
"bytes"
"context"
"errors"
"log/slog"
"net"
"strconv"
"testing"
@ -164,7 +165,7 @@ func TestRecover(t *testing.T) {
newDialer := func(atls.Validator) *dialer.Dialer { return nil }
r := &recoverCmd{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
configFetcher: stubAttestationFetcher{},
flags: recoverFlags{
rootFlags: rootFlags{force: true},
@ -218,7 +219,7 @@ func TestDoRecovery(t *testing.T) {
go recoverServer.Serve(listener)
defer recoverServer.GracefulStop()
r := &recoverCmd{log: logger.NewTest(t)}
r := &recoverCmd{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))}
recoverDoer := &recoverDoer{
dialer: dialer.New(nil, nil, netDialer),
endpoint: addr,

View file

@ -43,7 +43,6 @@ func runStatus(cmd *cobra.Command, _ []string) error {
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
fileHandler := file.NewHandler(afero.NewOsFs())

View file

@ -9,6 +9,7 @@ package cmd
import (
"bytes"
"errors"
"log/slog"
"testing"
"github.com/edgelesssys/constellation/v2/internal/constants"
@ -139,7 +140,7 @@ func TestTerminate(t *testing.T) {
fileHandler := file.NewHandler(tc.setupFs(require, tc.stateFile))
tCmd := &terminateCmd{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
fileHandler: fileHandler,
flags: terminateFlags{
yes: tc.yesFlag,

View file

@ -9,6 +9,7 @@ package cmd
import (
"bytes"
"context"
"log/slog"
"testing"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
@ -251,7 +252,7 @@ func TestUpgradeApply(t *testing.T) {
upgrader := &applyCmd{
fileHandler: fh,
flags: tc.flags,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
spinner: &nopSpinner{},
merger: &stubMerger{},
newInfraApplier: func(ctx context.Context) (cloudApplier, func(), error) {

View file

@ -92,7 +92,6 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
var flags upgradeCheckFlags
if err := flags.parse(cmd.Flags()); err != nil {
@ -188,7 +187,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco
// get current image version of the cluster
csp := conf.GetProvider()
attestationVariant := conf.GetAttestationConfig().GetVariant()
u.log.Debugf("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String())
u.log.Debug("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String())
current, err := u.collect.currentVersions(cmd.Context())
if err != nil {
@ -199,18 +198,18 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco
if err != nil {
return err
}
u.log.Debugf("Current cli version: %s", current.cli)
u.log.Debugf("Supported cli version(s): %s", supported.cli)
u.log.Debugf("Current service version: %s", current.service)
u.log.Debugf("Supported service version: %s", supported.service)
u.log.Debugf("Current k8s version: %s", current.k8s)
u.log.Debugf("Supported k8s version(s): %s", supported.k8s)
u.log.Debug("Current cli version: %s", current.cli)
u.log.Debug("Supported cli version(s): %s", supported.cli)
u.log.Debug("Current service version: %s", current.service)
u.log.Debug("Supported service version: %s", supported.service)
u.log.Debug("Current k8s version: %s", current.k8s)
u.log.Debug("Supported k8s version(s): %s", supported.k8s)
// Filter versions to only include upgrades
newServices := supported.service
if err := supported.service.IsUpgradeTo(current.service); err != nil {
newServices = consemver.Semver{}
u.log.Debugf("No valid service upgrades are available from %q to %q. The minor version can only drift by 1.\n", current.service.String(), supported.service.String())
u.log.Debug("No valid service upgrades are available from %q to %q. The minor version can only drift by 1.\n", current.service.String(), supported.service.String())
}
newKubernetes := filterK8sUpgrades(current.k8s, supported.k8s)
@ -222,13 +221,13 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco
return err
}
u.log.Debugf("Planning Terraform migrations")
u.log.Debug("Planning Terraform migrations")
// Add manual migrations here if required
//
// var manualMigrations []terraform.StateMigration
// for _, migration := range manualMigrations {
// u.log.Debugf("Adding manual Terraform migration: %s", migration.DisplayName)
// u.log.Debug("Adding manual Terraform migration: %s", migration.DisplayName)
// u.terraformChecker.AddManualStateMigration(migration)
// }
cmd.Println("The following Terraform migrations are available with this CLI:")
@ -344,7 +343,7 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide
// get expected measurements for each image
upgrades := make(map[string]measurements.M)
for _, version := range versions {
v.log.Debugf("Fetching measurements for image: %s", version)
v.log.Debug("Fetching measurements for image: %s", version)
shortPath := version.ShortPath()
publicKey, err := keyselect.CosignPublicKeyForVersion(version)
@ -365,7 +364,7 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide
}
upgrades[shortPath] = measurements
}
v.log.Debugf("Compatible image measurements are %v", upgrades)
v.log.Debug("Compatible image measurements are %v", upgrades)
return upgrades, nil
}
@ -453,9 +452,9 @@ func (v *versionCollector) newImages(ctx context.Context, currentImageVersion co
if err != nil {
return nil, fmt.Errorf("calculating next image minor version: %w", err)
}
v.log.Debugf("Current image minor version is %s", currentImageMinorVer)
v.log.Debugf("Current CLI minor version is %s", currentCLIMinorVer)
v.log.Debugf("Next image minor version is %s", nextImageMinorVer)
v.log.Debug("Current image minor version is %s", currentImageMinorVer)
v.log.Debug("Current CLI minor version is %s", currentCLIMinorVer)
v.log.Debug("Next image minor version is %s", nextImageMinorVer)
allowedMinorVersions := []string{currentImageMinorVer, nextImageMinorVer}
switch cliImageCompare := semver.Compare(currentCLIMinorVer, currentImageMinorVer); {
@ -471,7 +470,7 @@ func (v *versionCollector) newImages(ctx context.Context, currentImageVersion co
case cliImageCompare > 0:
allowedMinorVersions = []string{currentImageMinorVer, nextImageMinorVer}
}
v.log.Debugf("Allowed minor versions are %#v", allowedMinorVersions)
v.log.Debug("Allowed minor versions are %#v", allowedMinorVersions)
newerImages, err := v.newerVersions(ctx, allowedMinorVersions)
if err != nil {
@ -494,7 +493,7 @@ func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions []
patchList, err := v.verListFetcher.FetchVersionList(ctx, patchList)
var notFound *fetcher.NotFoundError
if errors.As(err, &notFound) {
v.log.Debugf("Skipping version: %s", err)
v.log.Debug("Skipping version: %s", err)
continue
}
if err != nil {
@ -502,7 +501,7 @@ func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions []
}
updateCandidates = append(updateCandidates, patchList.StructuredVersions()...)
}
v.log.Debugf("Update candidates are %v", updateCandidates)
v.log.Debug("Update candidates are %v", updateCandidates)
return updateCandidates, nil
}
@ -604,7 +603,7 @@ func getCompatibleImageMeasurements(ctx context.Context, writer io.Writer, clien
}
var fetchedMeasurements measurements.M
log.Debugf("Fetching for measurement url: %s", measurementsURL)
log.Debug("Fetching for measurement url: %s", measurementsURL)
hash, err := fetchedMeasurements.FetchAndVerify(
ctx, client, cosign,
@ -658,7 +657,7 @@ func (v *versionCollector) newCLIVersions(ctx context.Context) ([]consemver.Semv
return nil, fmt.Errorf("parsing version %s: %w", version, err)
}
if err := target.IsUpgradeTo(v.cliVersion); err != nil {
v.log.Debugf("Skipping incompatible minor version %q: %s", version, err)
v.log.Debug("Skipping incompatible minor version %q: %s", version, err)
continue
}
list := versionsapi.List{
@ -692,7 +691,7 @@ func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliP
var compatibleVersions []consemver.Semver
for _, version := range cliPatchVersions {
if err := version.IsUpgradeTo(v.cliVersion); err != nil {
v.log.Debugf("Skipping incompatible patch version %q: %s", version, err)
v.log.Debug("Skipping incompatible patch version %q: %s", version, err)
continue
}
req := versionsapi.CLIInfo{

View file

@ -11,6 +11,7 @@ import (
"context"
"errors"
"io"
"log/slog"
"net/http"
"strings"
"testing"
@ -139,7 +140,7 @@ func TestGetCompatibleImageMeasurements(t *testing.T) {
}
})
upgrades, err := getCompatibleImageMeasurements(context.Background(), &bytes.Buffer{}, client, &stubCosignVerifier{}, singleUUIDVerifier(), csp, attestationVariant, versionZero, logger.NewTest(t))
upgrades, err := getCompatibleImageMeasurements(context.Background(), &bytes.Buffer{}, client, &stubCosignVerifier{}, singleUUIDVerifier(), csp, attestationVariant, versionZero, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)))
assert.NoError(err)
for _, measurement := range upgrades {
@ -215,7 +216,7 @@ func TestUpgradeCheck(t *testing.T) {
collect: &tc.collector,
terraformChecker: tc.checker,
fileHandler: fileHandler,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
}
cmd := newUpgradeCheckCmd()

View file

@ -100,7 +100,6 @@ func runVerify(cmd *cobra.Command, _ []string) error {
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
fileHandler := file.NewHandler(afero.NewOsFs())
verifyClient := &constellationVerifier{
@ -129,7 +128,7 @@ func runVerify(cmd *cobra.Command, _ []string) error {
if err := v.flags.parse(cmd.Flags()); err != nil {
return err
}
v.log.Debugf("Using flags: %+v", v.flags)
v.log.Debug("Using flags: %+v", v.flags)
fetcher := attestationconfigapi.NewFetcher()
return v.verify(cmd, verifyClient, formatterFactory, fetcher)
}
@ -137,7 +136,7 @@ func runVerify(cmd *cobra.Command, _ []string) error {
type formatterFactory func(output string, attestation variant.Variant, log debugLog) (attestationDocFormatter, error)
func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factory formatterFactory, configFetcher attestationconfigapi.Fetcher) error {
c.log.Debugf("Loading configuration file from %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
c.log.Debug("Loading configuration file from %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
conf, err := config.New(c.fileHandler, constants.ConfigFilename, configFetcher, c.flags.force)
var configValidationErr *config.ValidationError
if errors.As(err, &configValidationErr) {
@ -170,13 +169,13 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factor
}
conf.UpdateMAAURL(maaURL)
c.log.Debugf("Updating expected PCRs")
c.log.Debug("Updating expected PCRs")
attConfig := conf.GetAttestationConfig()
if err := updateInitMeasurements(attConfig, ownerID, clusterID); err != nil {
return fmt.Errorf("updating expected PCRs: %w", err)
}
c.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())
c.log.Debug("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())
validator, err := choose.Validator(attConfig, warnLogger{cmd: cmd, log: c.log})
if err != nil {
return fmt.Errorf("creating aTLS validator: %w", err)
@ -186,7 +185,7 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factor
if err != nil {
return fmt.Errorf("generating random nonce: %w", err)
}
c.log.Debugf("Generated random nonce: %x", nonce)
c.log.Debug("Generated random nonce: %x", nonce)
rawAttestationDoc, err := verifyClient.Verify(
cmd.Context(),
@ -385,7 +384,7 @@ type constellationVerifier struct {
func (v *constellationVerifier) Verify(
ctx context.Context, endpoint string, req *verifyproto.GetAttestationRequest, validator atls.Validator,
) (string, error) {
v.log.Debugf("Dialing endpoint: %q", endpoint)
v.log.Debug("Dialing endpoint: %q", endpoint)
conn, err := v.dialer.DialInsecure(ctx, endpoint)
if err != nil {
return "", fmt.Errorf("dialing init server: %w", err)
@ -394,13 +393,13 @@ func (v *constellationVerifier) Verify(
client := verifyproto.NewAPIClient(conn)
v.log.Debugf("Sending attestation request")
v.log.Debug("Sending attestation request")
resp, err := client.GetAttestation(ctx, req)
if err != nil {
return "", fmt.Errorf("getting attestation: %w", err)
}
v.log.Debugf("Verifying attestation")
v.log.Debug("Verifying attestation")
signedData, err := validator.Validate(ctx, resp.Attestation, req.Nonce)
if err != nil {
return "", fmt.Errorf("validating attestation: %w", err)

View file

@ -18,6 +18,7 @@ import (
"strconv"
"strings"
"testing"
"log/slog"
"github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
@ -210,7 +211,7 @@ func TestVerify(t *testing.T) {
v := &verifyCmd{
fileHandler: fileHandler,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
flags: verifyFlags{
clusterID: tc.clusterIDFlag,
endpoint: tc.nodeEndpointFlag,
@ -242,7 +243,7 @@ func (f *stubAttDocFormatter) format(_ context.Context, _ string, _ bool, _ conf
func TestFormat(t *testing.T) {
formatter := func() *defaultAttestationDocFormatter {
return &defaultAttestationDocFormatter{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
}
}
@ -333,7 +334,7 @@ func TestVerifyClient(t *testing.T) {
go verifyServer.Serve(listener)
defer verifyServer.GracefulStop()
verifier := &constellationVerifier{dialer: dialer, log: logger.NewTest(t)}
verifier := &constellationVerifier{dialer: dialer, log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))}
request := &verifyproto.GetAttestationRequest{
Nonce: tc.nonce,
}

View file

@ -10,12 +10,12 @@ import (
"context"
"flag"
"fmt"
"log/slog"
"net"
"os"
"sync"
"github.com/spf13/afero"
"go.uber.org/zap"
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd/deploy"
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd/info"
@ -46,11 +46,11 @@ func main() {
verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription)
flag.Parse()
log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity))
log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: logger.VerbosityFromInt(*verbosity)}))
fs := afero.NewOsFs()
streamer := streamer.New(fs)
filetransferer := filetransfer.New(log.Named("filetransfer"), streamer, filetransfer.DontShowProgress)
serviceManager := deploy.NewServiceManager(log.Named("serviceManager"))
filetransferer := filetransfer.New(log.WithGroup("filetransfer"), streamer, filetransfer.DontShowProgress)
serviceManager := deploy.NewServiceManager(log.WithGroup("serviceManager"))
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -64,21 +64,24 @@ func main() {
case platform.AWS:
meta, err := awscloud.New(ctx)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to initialize AWS metadata")
log.With(slog.Any("error", err)).Error("Failed to initialize AWS metadata")
os.Exit(1)
}
fetcher = cloudprovider.New(meta)
case platform.Azure:
meta, err := azurecloud.New(ctx)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to initialize Azure metadata")
log.With(slog.Any("error", err)).Error("Failed to initialize Azure metadata")
os.Exit(1)
}
fetcher = cloudprovider.New(meta)
case platform.GCP:
meta, err := gcpcloud.New(ctx)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to initialize GCP metadata")
log.With(slog.Any("error", err)).Error("Failed to initialize GCP metadata")
os.Exit(1)
}
defer meta.Close()
fetcher = cloudprovider.New(meta)
@ -86,26 +89,27 @@ func main() {
case platform.OpenStack:
meta, err := openstackcloud.New(ctx)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to initialize OpenStack metadata")
log.With(slog.Any("error", err)).Error("Failed to initialize OpenStack metadata")
os.Exit(1)
}
fetcher = cloudprovider.New(meta)
case platform.QEMU:
fetcher = cloudprovider.New(qemucloud.New())
default:
log.Errorf("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp)
log.Error("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp)
fetcher = fallback.NewFallbackFetcher()
}
infoMap := info.NewMap()
infoMap.RegisterOnReceiveTrigger(
logcollector.NewStartTrigger(ctx, wg, platform.FromString(csp), fetcher, log.Named("logcollector")),
logcollector.NewStartTrigger(ctx, wg, platform.FromString(csp), fetcher, log.WithGroup("logcollector")),
)
download := deploy.New(log.Named("download"), &net.Dialer{}, serviceManager, filetransferer, infoMap)
download := deploy.New(log.WithGroup("download"), &net.Dialer{}, serviceManager, filetransferer, infoMap)
sched := metadata.NewScheduler(log.Named("scheduler"), fetcher, download)
serv := server.New(log.Named("server"), serviceManager, filetransferer, infoMap)
sched := metadata.NewScheduler(log.WithGroup("scheduler"), fetcher, download)
serv := server.New(log.WithGroup("server"), serviceManager, filetransferer, infoMap)
writeDebugBanner(log)
@ -114,14 +118,14 @@ func main() {
wg.Wait()
}
func writeDebugBanner(log *logger.Logger) {
func writeDebugBanner(log *slog.Logger) {
tty, err := os.OpenFile("/dev/ttyS0", os.O_WRONLY, os.ModeAppend)
if err != nil {
log.With(zap.Error(err)).Errorf("Unable to open /dev/ttyS0 for printing banner")
log.With(slog.Any("error", err)).Error("Unable to open /dev/ttyS0 for printing banner")
return
}
defer tty.Close()
if _, err := fmt.Fprint(tty, debugBanner); err != nil {
log.With(zap.Error(err)).Errorf("Unable to print to /dev/ttyS0")
log.With(slog.Any("error", err)).Error("Unable to print to /dev/ttyS0")
}
}

View file

@ -11,21 +11,20 @@ import (
"errors"
"fmt"
"io"
"log/slog"
"net"
"strconv"
"github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer"
pb "github.com/edgelesssys/constellation/v2/debugd/service"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/logger"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
// Download downloads a bootstrapper from a given debugd instance.
type Download struct {
log *logger.Logger
log *slog.Logger
dialer NetDialer
transfer fileTransferer
serviceManager serviceManager
@ -33,7 +32,7 @@ type Download struct {
}
// New creates a new Download.
func New(log *logger.Logger, dialer NetDialer, serviceManager serviceManager,
func New(log *slog.Logger, dialer NetDialer, serviceManager serviceManager,
transfer fileTransferer, info infoSetter,
) *Download {
return &Download{
@ -51,7 +50,7 @@ func (d *Download) DownloadInfo(ctx context.Context, ip string) error {
return nil
}
log := d.log.With(zap.String("ip", ip))
log := d.log.With(slog.String("ip", ip))
serverAddr := net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort))
client, closer, err := d.newClient(ctx, serverAddr, log)
@ -60,19 +59,19 @@ func (d *Download) DownloadInfo(ctx context.Context, ip string) error {
}
defer closer.Close()
log.Infof("Trying to download info")
log.Info("Trying to download info")
resp, err := client.GetInfo(ctx, &pb.GetInfoRequest{})
if err != nil {
return fmt.Errorf("getting info from other instance: %w", err)
}
log.Infof("Successfully downloaded info")
log.Info("Successfully downloaded info")
return d.info.SetProto(resp.Info)
}
// DownloadDeployment will open a new grpc connection to another instance, attempting to download files from that instance.
func (d *Download) DownloadDeployment(ctx context.Context, ip string) error {
log := d.log.With(zap.String("ip", ip))
log := d.log.With(slog.String("ip", ip))
serverAddr := net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort))
client, closer, err := d.newClient(ctx, serverAddr, log)
@ -81,7 +80,7 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error {
}
defer closer.Close()
log.Infof("Trying to download files")
log.Info("Trying to download files")
stream, err := client.DownloadFiles(ctx, &pb.DownloadFilesRequest{})
if err != nil {
return fmt.Errorf("starting file download from other instance: %w", err)
@ -90,15 +89,15 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error {
err = d.transfer.RecvFiles(stream)
switch {
case err == nil:
d.log.Infof("Downloading files succeeded")
d.log.Info("Downloading files succeeded")
case errors.Is(err, filetransfer.ErrReceiveRunning):
d.log.Warnf("Download already in progress")
d.log.Warn("Download already in progress")
return err
case errors.Is(err, filetransfer.ErrReceiveFinished):
d.log.Warnf("Download already finished")
d.log.Warn("Download already finished")
return nil
default:
d.log.With(zap.Error(err)).Errorf("Downloading files failed")
d.log.With(slog.Any("error", err)).Error("Downloading files failed")
return err
}
@ -111,15 +110,15 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error {
ctx, file.OverrideServiceUnit, file.TargetPath,
); err != nil {
// continue on error to allow other units to be overridden
d.log.With(zap.Error(err)).Errorf("Failed to override service unit %s", file.OverrideServiceUnit)
d.log.With(slog.Any("error", err)).Error("Failed to override service unit %s", file.OverrideServiceUnit)
}
}
return nil
}
func (d *Download) newClient(ctx context.Context, serverAddr string, log *logger.Logger) (pb.DebugdClient, io.Closer, error) {
log.Infof("Connecting to server")
func (d *Download) newClient(ctx context.Context, serverAddr string, log *slog.Logger) (pb.DebugdClient, io.Closer, error) {
log.Info("Connecting to server")
conn, err := d.dial(ctx, serverAddr)
if err != nil {
return nil, nil, fmt.Errorf("connecting to other instance via gRPC: %w", err)

View file

@ -9,6 +9,7 @@ package deploy
import (
"context"
"errors"
"log/slog"
"net"
"strconv"
"testing"
@ -117,7 +118,7 @@ func TestDownloadDeployment(t *testing.T) {
defer grpcServ.GracefulStop()
download := &Download{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
dialer: dialer,
transfer: transfer,
serviceManager: serviceMgr,
@ -189,7 +190,7 @@ func TestDownloadInfo(t *testing.T) {
defer grpcServer.GracefulStop()
download := &Download{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
dialer: dialer,
info: &tc.infoSetter,
}

View file

@ -9,15 +9,14 @@ package deploy
import (
"context"
"fmt"
"log/slog"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/afero"
"go.uber.org/zap"
)
const (
@ -60,14 +59,14 @@ type SystemdUnit struct {
// ServiceManager receives ServiceManagerRequests and units via channels and performs the requests / creates the unit files.
type ServiceManager struct {
log *logger.Logger
log *slog.Logger
dbus dbusClient
fs afero.Fs
systemdUnitFilewriteLock sync.Mutex
}
// NewServiceManager creates a new ServiceManager.
func NewServiceManager(log *logger.Logger) *ServiceManager {
func NewServiceManager(log *slog.Logger) *ServiceManager {
fs := afero.NewOsFs()
return &ServiceManager{
log: log,
@ -102,7 +101,7 @@ type dbusConn interface {
// SystemdAction will perform a systemd action on a service unit (start, stop, restart, reload).
func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManagerRequest) error {
log := s.log.With(zap.String("unit", request.Unit), zap.String("action", request.Action.String()))
log := s.log.With(slog.String("unit", request.Unit), slog.String("action", request.Action.String()))
conn, err := s.dbus.NewSystemConnectionContext(ctx)
if err != nil {
return fmt.Errorf("establishing systemd connection: %w", err)
@ -127,7 +126,7 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag
}
if request.Action == Reload {
log.Infof("daemon-reload succeeded")
log.Info("daemon-reload succeeded")
return nil
}
// Wait for the action to finish and then check if it was
@ -136,7 +135,7 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag
switch result {
case "done":
log.Infof("%s on systemd unit %s succeeded", request.Action, request.Unit)
log.Info("%s on systemd unit %s succeeded", request.Action, request.Unit)
return nil
default:
@ -146,8 +145,8 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag
// WriteSystemdUnitFile will write a systemd unit to disk.
func (s *ServiceManager) WriteSystemdUnitFile(ctx context.Context, unit SystemdUnit) error {
log := s.log.With(zap.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name)))
log.Infof("Writing systemd unit file")
log := s.log.With(slog.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name)))
log.Info("Writing systemd unit file")
s.systemdUnitFilewriteLock.Lock()
defer s.systemdUnitFilewriteLock.Unlock()
if err := afero.WriteFile(s.fs, fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name), []byte(unit.Contents), 0o644); err != nil {
@ -158,14 +157,14 @@ func (s *ServiceManager) WriteSystemdUnitFile(ctx context.Context, unit SystemdU
return fmt.Errorf("performing systemd daemon-reload: %w", err)
}
log.Infof("Wrote systemd unit file and performed daemon-reload")
log.Info("Wrote systemd unit file and performed daemon-reload")
return nil
}
// OverrideServiceUnitExecStart will override the ExecStart of a systemd unit.
func (s *ServiceManager) OverrideServiceUnitExecStart(ctx context.Context, unitName, execStart string) error {
log := s.log.With(zap.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unitName)))
log.Infof("Overriding systemd unit file execStart")
log := s.log.With(slog.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unitName)))
log.Info("Overriding systemd unit file execStart")
if !systemdUnitNameRegexp.MatchString(unitName) {
return fmt.Errorf("unit name %q is invalid", unitName)
}
@ -187,13 +186,13 @@ func (s *ServiceManager) OverrideServiceUnitExecStart(ctx context.Context, unitN
// do not return early here
// the "daemon-reload" command may return an unrelated error
// and there is no way to know if the override was successful
log.Warnf("Failed to perform systemd daemon-reload: %v", err)
log.Warn("Failed to perform systemd daemon-reload: %v", err)
}
if err := s.SystemdAction(ctx, ServiceManagerRequest{Unit: unitName + ".service", Action: Restart}); err != nil {
log.Warnf("Failed to perform unit restart: %v", err)
log.Warn("Failed to perform unit restart: %v", err)
return fmt.Errorf("performing systemd unit restart: %w", err)
}
log.Infof("Overrode systemd unit file execStart, performed daemon-reload and restarted unit %v", unitName)
log.Info("Overrode systemd unit file execStart, performed daemon-reload and restarted unit %v", unitName)
return nil
}

View file

@ -10,6 +10,7 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"sync"
"testing"
@ -102,7 +103,7 @@ func TestSystemdAction(t *testing.T) {
fs := afero.NewMemMapFs()
manager := ServiceManager{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
dbus: &tc.dbus,
fs: fs,
systemdUnitFilewriteLock: sync.Mutex{},
@ -181,7 +182,7 @@ func TestWriteSystemdUnitFile(t *testing.T) {
fs = afero.NewReadOnlyFs(fs)
}
manager := ServiceManager{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
dbus: &tc.dbus,
fs: fs,
systemdUnitFilewriteLock: sync.Mutex{},
@ -294,7 +295,7 @@ func TestOverrideServiceUnitExecStart(t *testing.T) {
fs = afero.NewReadOnlyFs(fs)
}
manager := ServiceManager{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
dbus: &tc.dbus,
fs: fs,
systemdUnitFilewriteLock: sync.Mutex{},

View file

@ -12,6 +12,7 @@ import (
"context"
"fmt"
"io"
"log/slog"
"os"
"os/exec"
"path/filepath"
@ -22,7 +23,6 @@ import (
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd/info"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions"
)
@ -36,60 +36,60 @@ const (
//
// This requires podman to be installed.
func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprovider.Provider,
metadata providerMetadata, logger *logger.Logger,
metadata providerMetadata, logger *slog.Logger,
) func(*info.Map) {
return func(infoMap *info.Map) {
wg.Add(1)
go func() {
defer wg.Done()
logger.Infof("Start trigger running")
logger.Info("Start trigger running")
if err := ctx.Err(); err != nil {
logger.With("err", err).Errorf("Start trigger canceled")
logger.With("err", err).Error("Start trigger canceled")
return
}
logger.Infof("Get flags from infos")
logger.Info("Get flags from infos")
_, ok, err := infoMap.Get("logcollect")
if err != nil {
logger.Errorf("Getting infos: %v", err)
logger.Error("Getting infos: %v", err)
return
}
if !ok {
logger.Infof("Flag 'logcollect' not set")
logger.Info("Flag 'logcollect' not set")
return
}
cerdsGetter, err := newCloudCredentialGetter(ctx, provider, infoMap)
if err != nil {
logger.Errorf("Creating cloud credential getter: %v", err)
logger.Error("Creating cloud credential getter: %v", err)
return
}
logger.Infof("Getting credentials")
logger.Info("Getting credentials")
creds, err := cerdsGetter.GetOpensearchCredentials(ctx)
if err != nil {
logger.Errorf("Getting opensearch credentials: %v", err)
logger.Error("Getting opensearch credentials: %v", err)
return
}
logger.Infof("Getting logstash pipeline template from image %s", versions.LogstashImage)
logger.Info("Getting logstash pipeline template from image %s", versions.LogstashImage)
tmpl, err := getTemplate(ctx, logger, versions.LogstashImage, "/run/logstash/templates/pipeline.conf", "/run/logstash")
if err != nil {
logger.Errorf("Getting logstash pipeline template: %v", err)
logger.Error("Getting logstash pipeline template: %v", err)
return
}
infoMapM, err := infoMap.GetCopy()
if err != nil {
logger.Errorf("Getting copy of map from info: %v", err)
logger.Error("Getting copy of map from info: %v", err)
return
}
infoMapM = filterInfoMap(infoMapM)
setCloudMetadata(ctx, infoMapM, provider, metadata)
logger.Infof("Writing logstash pipeline")
logger.Info("Writing logstash pipeline")
pipelineConf := logstashConfInput{
Port: 5044,
Host: openSearchHost,
@ -97,14 +97,14 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov
Credentials: creds,
}
if err := writeTemplate("/run/logstash/pipeline/pipeline.conf", tmpl, pipelineConf); err != nil {
logger.Errorf("Writing logstash config: %v", err)
logger.Error("Writing logstash config: %v", err)
return
}
logger.Infof("Getting filebeat config template from image %s", versions.FilebeatImage)
logger.Info("Getting filebeat config template from image %s", versions.FilebeatImage)
tmpl, err = getTemplate(ctx, logger, versions.FilebeatImage, "/run/filebeat/templates/filebeat.yml", "/run/filebeat")
if err != nil {
logger.Errorf("Getting filebeat config template: %v", err)
logger.Error("Getting filebeat config template: %v", err)
return
}
filebeatConf := filebeatConfInput{
@ -112,26 +112,26 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov
AddCloudMetadata: true,
}
if err := writeTemplate("/run/filebeat/filebeat.yml", tmpl, filebeatConf); err != nil {
logger.Errorf("Writing filebeat pipeline: %v", err)
logger.Error("Writing filebeat pipeline: %v", err)
return
}
logger.Infof("Starting log collection pod")
logger.Info("Starting log collection pod")
if err := startPod(ctx, logger); err != nil {
logger.Errorf("Starting log collection: %v", err)
logger.Error("Starting log collection: %v", err)
}
}()
}
}
func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir, destDir string) (*template.Template, error) {
func getTemplate(ctx context.Context, logger *slog.Logger, image, templateDir, destDir string) (*template.Template, error) {
createContainerArgs := []string{
"create",
"--name=template",
image,
}
createContainerCmd := exec.CommandContext(ctx, "podman", createContainerArgs...)
logger.Infof("Creating template container")
logger.Info("Creating template container")
if out, err := createContainerCmd.CombinedOutput(); err != nil {
return nil, fmt.Errorf("creating template container: %w\n%s", err, out)
}
@ -146,7 +146,7 @@ func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir,
destDir,
}
copyFromCmd := exec.CommandContext(ctx, "podman", copyFromArgs...)
logger.Infof("Copying templates")
logger.Info("Copying templates")
if out, err := copyFromCmd.CombinedOutput(); err != nil {
return nil, fmt.Errorf("copying templates: %w\n%s", err, out)
}
@ -156,7 +156,7 @@ func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir,
"template",
}
removeContainerCmd := exec.CommandContext(ctx, "podman", removeContainerArgs...)
logger.Infof("Removing template container")
logger.Info("Removing template container")
if out, err := removeContainerCmd.CombinedOutput(); err != nil {
return nil, fmt.Errorf("removing template container: %w\n%s", err, out)
}
@ -169,7 +169,7 @@ func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir,
return tmpl, nil
}
func startPod(ctx context.Context, logger *logger.Logger) error {
func startPod(ctx context.Context, logger *slog.Logger) error {
// create a shared pod for filebeat, metricbeat and logstash
createPodArgs := []string{
"pod",
@ -177,13 +177,13 @@ func startPod(ctx context.Context, logger *logger.Logger) error {
"logcollection",
}
createPodCmd := exec.CommandContext(ctx, "podman", createPodArgs...)
logger.Infof("Create pod command: %v", createPodCmd.String())
logger.Info("Create pod command: %v", createPodCmd.String())
if out, err := createPodCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to create pod: %w; output: %s", err, out)
}
// start logstash container
logstashLog := newCmdLogger(logger.Named("logstash"))
logstashLog := newCmdLogger(logger.WithGroup("logstash"))
runLogstashArgs := []string{
"run",
"--rm",
@ -194,7 +194,7 @@ func startPod(ctx context.Context, logger *logger.Logger) error {
versions.LogstashImage,
}
runLogstashCmd := exec.CommandContext(ctx, "podman", runLogstashArgs...)
logger.Infof("Run logstash command: %v", runLogstashCmd.String())
logger.Info("Run logstash command: %v", runLogstashCmd.String())
runLogstashCmd.Stdout = logstashLog
runLogstashCmd.Stderr = logstashLog
if err := runLogstashCmd.Start(); err != nil {
@ -202,7 +202,7 @@ func startPod(ctx context.Context, logger *logger.Logger) error {
}
// start filebeat container
filebeatLog := newCmdLogger(logger.Named("filebeat"))
filebeatLog := newCmdLogger(logger.WithGroup("filebeat"))
runFilebeatArgs := []string{
"run",
"--rm",
@ -219,7 +219,7 @@ func startPod(ctx context.Context, logger *logger.Logger) error {
versions.FilebeatImage,
}
runFilebeatCmd := exec.CommandContext(ctx, "podman", runFilebeatArgs...)
logger.Infof("Run filebeat command: %v", runFilebeatCmd.String())
logger.Info("Run filebeat command: %v", runFilebeatCmd.String())
runFilebeatCmd.Stdout = filebeatLog
runFilebeatCmd.Stderr = filebeatLog
if err := runFilebeatCmd.Start(); err != nil {
@ -295,16 +295,16 @@ func setCloudMetadata(ctx context.Context, m map[string]string, provider cloudpr
}
}
func newCmdLogger(logger *logger.Logger) io.Writer {
func newCmdLogger(logger *slog.Logger) io.Writer {
return &cmdLogger{logger: logger}
}
type cmdLogger struct {
logger *logger.Logger
logger *slog.Logger
}
func (c *cmdLogger) Write(p []byte) (n int, err error) {
c.logger.Infof("%s", p)
c.logger.Info("%s", p)
return len(p), nil
}

View file

@ -8,12 +8,11 @@ package metadata
import (
"context"
"log/slog"
"sync"
"time"
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd"
"github.com/edgelesssys/constellation/v2/internal/logger"
"go.uber.org/zap"
)
// Fetcher retrieves other debugd IPs from cloud provider metadata.
@ -24,7 +23,7 @@ type Fetcher interface {
// Scheduler schedules fetching of metadata using timers.
type Scheduler struct {
log *logger.Logger
log *slog.Logger
fetcher Fetcher
downloader downloader
deploymentDone bool
@ -33,7 +32,7 @@ type Scheduler struct {
}
// NewScheduler returns a new scheduler.
func NewScheduler(log *logger.Logger, fetcher Fetcher, downloader downloader) *Scheduler {
func NewScheduler(log *slog.Logger, fetcher Fetcher, downloader downloader) *Scheduler {
return &Scheduler{
log: log,
fetcher: fetcher,
@ -60,22 +59,22 @@ func (s *Scheduler) Start(ctx context.Context, wg *sync.WaitGroup) {
ips, err := s.fetcher.DiscoverDebugdIPs(ctx)
if err != nil {
s.log.With(zap.Error(err)).Warnf("Discovering debugd IPs failed")
s.log.With(slog.Any("error", err)).Warn("Discovering debugd IPs failed")
}
lbip, err := s.fetcher.DiscoverLoadBalancerIP(ctx)
if err != nil {
s.log.With(zap.Error(err)).Warnf("Discovering load balancer IP failed")
s.log.With(slog.Any("error", err)).Warn("Discovering load balancer IP failed")
} else {
ips = append(ips, lbip)
}
if len(ips) == 0 {
s.log.With(zap.Error(err)).Warnf("No debugd IPs discovered")
s.log.With(slog.Any("error", err)).Warn("No debugd IPs discovered")
continue
}
s.log.With(zap.Strings("ips", ips)).Infof("Discovered instances")
s.log.With(slog.Any("ips", ips)).Info("Discovered instances")
s.download(ctx, ips)
if s.deploymentDone && s.infoDone {
return
@ -90,8 +89,8 @@ func (s *Scheduler) download(ctx context.Context, ips []string) {
for _, ip := range ips {
if !s.deploymentDone {
if err := s.downloader.DownloadDeployment(ctx, ip); err != nil {
s.log.With(zap.Error(err), zap.String("peer", ip)).
Warnf("Downloading deployment from %s: %s", ip, err)
s.log.With(slog.Any("error", err), slog.String("peer", ip)).
Warn("Downloading deployment from %s: %s", ip, err)
} else {
s.deploymentDone = true
}
@ -99,8 +98,8 @@ func (s *Scheduler) download(ctx context.Context, ips []string) {
if !s.infoDone {
if err := s.downloader.DownloadInfo(ctx, ip); err != nil {
s.log.With(zap.Error(err), zap.String("peer", ip)).
Warnf("Downloading info from %s: %s", ip, err)
s.log.With(slog.Any("error", err), slog.String("peer", ip)).
Warn("Downloading info from %s: %s", ip, err)
} else {
s.infoDone = true
}

View file

@ -84,7 +84,7 @@ func TestSchedulerStart(t *testing.T) {
assert := assert.New(t)
scheduler := Scheduler{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
fetcher: &tc.fetcher,
downloader: &tc.downloader,
interval: 20 * time.Millisecond,

View file

@ -10,7 +10,9 @@ package server
import (
"context"
"errors"
"log/slog"
"net"
"os"
"strconv"
"sync"
"time"
@ -27,7 +29,7 @@ import (
)
type debugdServer struct {
log *logger.Logger
log *slog.Logger
serviceManager serviceManager
transfer fileTransferer
info *info.Map
@ -36,7 +38,7 @@ type debugdServer struct {
}
// New creates a new debugdServer according to the gRPC spec.
func New(log *logger.Logger, serviceManager serviceManager, transfer fileTransferer, infos *info.Map) pb.DebugdServer {
func New(log *slog.Logger, serviceManager serviceManager, transfer fileTransferer, infos *info.Map) pb.DebugdServer {
return &debugdServer{
log: log,
serviceManager: serviceManager,
@ -47,25 +49,25 @@ func New(log *logger.Logger, serviceManager serviceManager, transfer fileTransfe
// SetInfo sets the info of the debugd instance.
func (s *debugdServer) SetInfo(_ context.Context, req *pb.SetInfoRequest) (*pb.SetInfoResponse, error) {
s.log.Infof("Received SetInfo request")
s.log.Info("Received SetInfo request")
if len(req.Info) == 0 {
s.log.Infof("Info is empty")
s.log.Info("Info is empty")
}
setProtoErr := s.info.SetProto(req.Info)
if errors.Is(setProtoErr, info.ErrInfoAlreadySet) {
s.log.Warnf("Setting info failed (already set)")
s.log.Warn("Setting info failed (already set)")
return &pb.SetInfoResponse{
Status: pb.SetInfoStatus_SET_INFO_ALREADY_SET,
}, nil
}
if setProtoErr != nil {
s.log.With(zap.Error(setProtoErr)).Errorf("Setting info failed")
s.log.With(slog.Any("error", setProtoErr)).Error("Setting info failed")
return nil, setProtoErr
}
s.log.Infof("Info set")
s.log.Info("Info set")
return &pb.SetInfoResponse{
Status: pb.SetInfoStatus_SET_INFO_SUCCESS,
@ -74,7 +76,7 @@ func (s *debugdServer) SetInfo(_ context.Context, req *pb.SetInfoRequest) (*pb.S
// GetInfo returns the info of the debugd instance.
func (s *debugdServer) GetInfo(_ context.Context, _ *pb.GetInfoRequest) (*pb.GetInfoResponse, error) {
s.log.Infof("Received GetInfo request")
s.log.Info("Received GetInfo request")
info, err := s.info.GetProto()
if err != nil {
@ -86,23 +88,23 @@ func (s *debugdServer) GetInfo(_ context.Context, _ *pb.GetInfoRequest) (*pb.Get
// UploadFiles receives a stream of files (each consisting of a header and a stream of chunks) and writes them to the filesystem.
func (s *debugdServer) UploadFiles(stream pb.Debugd_UploadFilesServer) error {
s.log.Infof("Received UploadFiles request")
s.log.Info("Received UploadFiles request")
err := s.transfer.RecvFiles(stream)
switch {
case err == nil:
s.log.Infof("Uploading files succeeded")
s.log.Info("Uploading files succeeded")
case errors.Is(err, filetransfer.ErrReceiveRunning):
s.log.Warnf("Upload already in progress")
s.log.Warn("Upload already in progress")
return stream.SendAndClose(&pb.UploadFilesResponse{
Status: pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_STARTED,
})
case errors.Is(err, filetransfer.ErrReceiveFinished):
s.log.Warnf("Upload already finished")
s.log.Warn("Upload already finished")
return stream.SendAndClose(&pb.UploadFilesResponse{
Status: pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_FINISHED,
})
default:
s.log.With(zap.Error(err)).Errorf("Uploading files failed")
s.log.With(slog.Any("error", err)).Error("Uploading files failed")
return stream.SendAndClose(&pb.UploadFilesResponse{
Status: pb.UploadFilesStatus_UPLOAD_FILES_UPLOAD_FAILED,
})
@ -120,7 +122,7 @@ func (s *debugdServer) UploadFiles(stream pb.Debugd_UploadFilesServer) error {
}
if overrideUnitErr != nil {
s.log.With(zap.Error(overrideUnitErr)).Errorf("Overriding service units failed")
s.log.With(slog.Any("error", overrideUnitErr)).Error("Overriding service units failed")
return stream.SendAndClose(&pb.UploadFilesResponse{
Status: pb.UploadFilesStatus_UPLOAD_FILES_START_FAILED,
})
@ -132,13 +134,13 @@ func (s *debugdServer) UploadFiles(stream pb.Debugd_UploadFilesServer) error {
// DownloadFiles streams the previously received files to other instances.
func (s *debugdServer) DownloadFiles(_ *pb.DownloadFilesRequest, stream pb.Debugd_DownloadFilesServer) error {
s.log.Infof("Sending files to other instance")
s.log.Info("Sending files to other instance")
return s.transfer.SendFiles(stream)
}
// UploadSystemServiceUnits receives systemd service units, writes them to a service file and schedules a daemon-reload.
func (s *debugdServer) UploadSystemServiceUnits(ctx context.Context, in *pb.UploadSystemdServiceUnitsRequest) (*pb.UploadSystemdServiceUnitsResponse, error) {
s.log.Infof("Uploading systemd service units")
s.log.Info("Uploading systemd service units")
for _, unit := range in.Units {
if err := s.serviceManager.WriteSystemdUnitFile(ctx, deploy.SystemdUnit{Name: unit.Name, Contents: unit.Contents}); err != nil {
return &pb.UploadSystemdServiceUnitsResponse{Status: pb.UploadSystemdServiceUnitsStatus_UPLOAD_SYSTEMD_SERVICE_UNITS_FAILURE}, nil
@ -149,25 +151,27 @@ func (s *debugdServer) UploadSystemServiceUnits(ctx context.Context, in *pb.Uplo
}
// Start will start the gRPC server as goroutine.
func Start(log *logger.Logger, wg *sync.WaitGroup, serv pb.DebugdServer) {
func Start(log *slog.Logger, wg *sync.WaitGroup, serv pb.DebugdServer) {
wg.Add(1)
go func() {
defer wg.Done()
grpcLog := log.Named("gRPC")
grpcLog := log.WithGroup("gRPC")
// TODO(miampf): Find a way to dynamically increase the log level
grpcLog.WithIncreasedLevel(zap.WarnLevel).ReplaceGRPCLogger()
grpcServer := grpc.NewServer(
grpcLog.GetServerStreamInterceptor(),
grpcLog.GetServerUnaryInterceptor(),
logger.GetServerStreamInterceptor(grpcLog),
logger.GetServerUnaryInterceptor(grpcLog),
grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}),
)
pb.RegisterDebugdServer(grpcServer, serv)
lis, err := net.Listen("tcp", net.JoinHostPort("0.0.0.0", strconv.Itoa(constants.DebugdPort)))
if err != nil {
log.With(zap.Error(err)).Fatalf("Listening failed")
log.With(slog.Any("error", err)).Error("Listening failed")
os.Exit(1)
}
log.Infof("gRPC server is waiting for connections")
log.Info("gRPC server is waiting for connections")
grpcServer.Serve(lis)
}()
}

View file

@ -65,7 +65,7 @@ func TestSetInfo(t *testing.T) {
require := require.New(t)
serv := debugdServer{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
info: tc.info,
}
@ -128,7 +128,7 @@ func TestGetInfo(t *testing.T) {
}
serv := debugdServer{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
info: tc.info,
}
@ -192,7 +192,7 @@ func TestUploadFiles(t *testing.T) {
transfer := &stubTransfer{files: tc.files, recvFilesErr: tc.recvFilesErr}
serv := debugdServer{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
serviceManager: serviceMgr,
transfer: transfer,
}
@ -237,7 +237,7 @@ func TestDownloadFiles(t *testing.T) {
transfer := &stubTransfer{canSend: tc.canSend}
serv := debugdServer{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
transfer: transfer,
}
@ -317,7 +317,7 @@ func TestUploadSystemServiceUnits(t *testing.T) {
require := require.New(t)
serv := debugdServer{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
serviceManager: &tc.serviceManager,
}
grpcServ, conn, err := setupServerWithConn(endpoint, &serv)

View file

@ -12,13 +12,13 @@ import (
"errors"
"io"
"io/fs"
"log/slog"
"sync"
"sync/atomic"
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd"
"github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer/streamer"
pb "github.com/edgelesssys/constellation/v2/debugd/service"
"github.com/edgelesssys/constellation/v2/internal/logger"
"go.uber.org/zap"
)
@ -35,7 +35,7 @@ type SendFilesStream interface {
// FileTransferer manages sending and receiving of files.
type FileTransferer struct {
fileMux sync.RWMutex
log *logger.Logger
log *slog.Logger
receiveStarted bool
receiveFinished atomic.Bool
files []FileStat
@ -44,7 +44,7 @@ type FileTransferer struct {
}
// New creates a new FileTransferer.
func New(log *logger.Logger, streamer streamReadWriter, showProgress bool) *FileTransferer {
func New(log *slog.Logger, streamer streamReadWriter, showProgress bool) *FileTransferer {
return &FileTransferer{
log: log,
streamer: streamer,
@ -146,7 +146,7 @@ func (s *FileTransferer) handleFileRecv(stream RecvFilesStream) (bool, error) {
if header == nil {
return false, errors.New("first message must be a header message")
}
s.log.Infof("Starting file receive of %q", header.TargetPath)
s.log.Info("Starting file receive of %q", header.TargetPath)
s.addFile(FileStat{
SourcePath: header.TargetPath,
TargetPath: header.TargetPath,
@ -160,10 +160,10 @@ func (s *FileTransferer) handleFileRecv(stream RecvFilesStream) (bool, error) {
})
recvChunkStream := &recvChunkStream{stream: stream}
if err := s.streamer.WriteStream(header.TargetPath, recvChunkStream, s.showProgress); err != nil {
s.log.With(zap.Error(err)).Errorf("Receive of file %q failed", header.TargetPath)
s.log.With(slog.Any("error", err)).Error("Receive of file %q failed", header.TargetPath)
return false, err
}
s.log.Infof("Finished file receive of %q", header.TargetPath)
s.log.Info("Finished file receive of %q", header.TargetPath)
return false, nil
}

View file

@ -117,7 +117,7 @@ func TestSendFiles(t *testing.T) {
streamer := &stubStreamReadWriter{readStreamErr: tc.readStreamErr}
stream := &stubSendFilesStream{sendErr: tc.sendErr}
transfer := &FileTransferer{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
streamer: streamer,
showProgress: false,
}
@ -254,7 +254,7 @@ func TestRecvFiles(t *testing.T) {
streamer := &stubStreamReadWriter{writeStreamErr: tc.writeStreamErr}
stream := &fakeRecvFilesStream{msgs: tc.msgs, recvErr: tc.recvErr}
transfer := New(logger.NewTest(t), streamer, false)
transfer := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), streamer, false)
if tc.recvAlreadyStarted {
transfer.receiveStarted = true
}
@ -307,7 +307,7 @@ func TestGetSetFiles(t *testing.T) {
assert := assert.New(t)
streamer := &dummyStreamReadWriter{}
transfer := New(logger.NewTest(t), streamer, false)
transfer := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), streamer, false)
if tc.setFiles != nil {
transfer.SetFiles(*tc.setFiles)
}
@ -319,7 +319,7 @@ func TestGetSetFiles(t *testing.T) {
}
func TestConcurrency(t *testing.T) {
ft := New(logger.NewTest(t), &stubStreamReadWriter{}, false)
ft := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), &stubStreamReadWriter{}, false)
sendFiles := func() {
_ = ft.SendFiles(&stubSendFilesStream{})

View file

@ -10,6 +10,7 @@ import (
"context"
"flag"
"io"
"log/slog"
"net"
"os"
"path/filepath"
@ -50,18 +51,20 @@ func main() {
verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription)
flag.Parse()
log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity))
log.With(zap.String("version", constants.BinaryVersion().String()), zap.String("cloudProvider", *csp)).
Infof("Starting disk-mapper")
log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: logger.VerbosityFromInt(*verbosity)}))
log.With(slog.String("version", constants.BinaryVersion().String()), slog.String("cloudProvider", *csp)).
Info("Starting disk-mapper")
// set up quote issuer for aTLS connections
attestVariant, err := variant.FromString(os.Getenv(constants.AttestationVariant))
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to parse attestation variant")
log.With(slog.Any("error", err)).Error("Failed to parse attestation variant")
os.Exit(1)
}
issuer, err := choose.Issuer(attestVariant, log)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to select issuer")
log.With(slog.Any("error", err)).Error("Failed to select issuer")
os.Exit(1)
}
// set up metadata API
@ -73,31 +76,37 @@ func main() {
// using udev rules, a symlink for our disk is created at /dev/sdb
diskPath, err = filepath.EvalSymlinks(awsStateDiskPath)
if err != nil {
log.With(zap.Error(err)).Fatalf("Unable to resolve Azure state disk path")
log.With(slog.Any("error", err)).Error("Unable to resolve Azure state disk path")
os.Exit(1)
}
metadataClient, err = awscloud.New(context.Background())
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to set up AWS metadata client")
log.With(slog.Any("error", err)).Error("Failed to set up AWS metadata client")
os.Exit(1)
}
case cloudprovider.Azure:
diskPath, err = filepath.EvalSymlinks(azureStateDiskPath)
if err != nil {
log.With(zap.Error(err)).Fatalf("Unable to resolve Azure state disk path")
log.With(slog.Any("error", err)).Error("Unable to resolve Azure state disk path")
os.Exit(1)
}
metadataClient, err = azurecloud.New(context.Background())
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to set up Azure metadata client")
log.With(slog.Any("error", err)).Error("Failed to set up Azure metadata client")
os.Exit(1)
}
case cloudprovider.GCP:
diskPath, err = filepath.EvalSymlinks(gcpStateDiskPath)
if err != nil {
log.With(zap.Error(err)).Fatalf("Unable to resolve GCP state disk path")
log.With(slog.Any("error", err)).Error("Unable to resolve GCP state disk path")
os.Exit(1)
}
gcpMeta, err := gcpcloud.New(context.Background())
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to create GCP metadata client")
log.With(slog.Any("error", err)).Error(("Failed to create GCP metadata client"))
os.Exit(1)
}
defer gcpMeta.Close()
metadataClient = gcpMeta
@ -106,7 +115,8 @@ func main() {
diskPath = openstackStateDiskPath
metadataClient, err = openstack.New(context.Background())
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to create OpenStack metadata client")
log.With(slog.Any("error", err)).Error(("Failed to create OpenStack metadata client"))
os.Exit(1)
}
case cloudprovider.QEMU:
@ -114,13 +124,15 @@ func main() {
metadataClient = qemucloud.New()
default:
log.Fatalf("CSP %s is not supported by Constellation", *csp)
log.Error("CSP %s is not supported by Constellation", *csp)
os.Exit(1)
}
// initialize device mapper
mapper, free, err := diskencryption.New(diskPath, log)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to initialize device mapper")
log.With(slog.Any("error", err)).Error(("Failed to initialize device mapper"))
os.Exit(1)
}
defer free()
@ -132,7 +144,7 @@ func main() {
}
}
setupManger := setup.New(
log.Named("setupManager"),
log.WithGroup("setupManager"),
*csp,
diskPath,
afero.Afero{Fs: afero.NewOsFs()},
@ -142,7 +154,8 @@ func main() {
)
if err := setupManger.LogDevices(); err != nil {
log.With(zap.Error(err)).Fatalf("Failed to log devices")
log.With(slog.Any("error", err)).Error(("Failed to log devices"))
os.Exit(1)
}
// prepare the state disk
@ -151,21 +164,22 @@ func main() {
var self metadata.InstanceMetadata
self, err = metadataClient.Self(context.Background())
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to get self metadata")
log.With(slog.Any("error", err)).Error(("Failed to get self metadata"))
os.Exit(1)
}
rejoinClient := rejoinclient.New(
dialer.New(issuer, nil, &net.Dialer{}),
self,
metadataClient,
log.Named("rejoinClient"),
log.WithGroup("rejoinClient"),
)
// set up recovery server if control-plane node
var recoveryServer setup.RecoveryServer
if self.Role == role.ControlPlane {
recoveryServer = recoveryserver.New(issuer, kmssetup.KMS, log.Named("recoveryServer"))
recoveryServer = recoveryserver.New(issuer, kmssetup.KMS, log.WithGroup("recoveryServer"))
} else {
recoveryServer = recoveryserver.NewStub(log.Named("recoveryServer"))
recoveryServer = recoveryserver.NewStub(log.WithGroup("recoveryServer"))
}
err = setupManger.PrepareExistingDisk(setup.NewNodeRecoverer(recoveryServer, rejoinClient))
@ -173,6 +187,7 @@ func main() {
err = setupManger.PrepareNewDisk()
}
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to prepare state disk")
log.With(slog.Any("error", err)).Error(("Failed to prepare state disk"))
os.Exit(1)
}
}

View file

@ -15,22 +15,21 @@ package diskencryption
import (
"fmt"
"log/slog"
"time"
"github.com/edgelesssys/constellation/v2/internal/cryptsetup"
"github.com/edgelesssys/constellation/v2/internal/logger"
"go.uber.org/zap"
)
// DiskEncryption handles actions for formatting and mapping crypt devices.
type DiskEncryption struct {
device cryptDevice
devicePath string
log *logger.Logger
log *slog.Logger
}
// New creates a new crypt device for the device at path.
func New(path string, log *logger.Logger) (*DiskEncryption, func(), error) {
func New(path string, log *slog.Logger) (*DiskEncryption, func(), error) {
device := cryptsetup.New()
_, err := device.Init(path)
if err != nil {
@ -101,7 +100,7 @@ func (d *DiskEncryption) UnmapDisk(target string) error {
func (d *DiskEncryption) Wipe(blockWipeSize int) error {
logProgress := func(size, offset uint64) {
prog := (float64(offset) / float64(size)) * 100
d.log.With(zap.String("progress", fmt.Sprintf("%.2f%%", prog))).Infof("Wiping disk")
d.log.With(slog.String("progress", fmt.Sprintf("%.2f%%", prog))).Info("Wiping disk")
}
start := time.Now()
@ -109,7 +108,7 @@ func (d *DiskEncryption) Wipe(blockWipeSize int) error {
if err := d.device.Wipe("integrity", blockWipeSize, 0, logProgress, 30*time.Second); err != nil {
return fmt.Errorf("wiping disk: %w", err)
}
d.log.With(zap.Duration("duration", time.Since(start))).Infof("Wiping disk successful")
d.log.With(slog.Duration("duration", time.Since(start))).Info("Wiping disk successful")
return nil
}

View file

@ -17,6 +17,7 @@ package recoveryserver
import (
"context"
"log/slog"
"net"
"sync"
@ -44,13 +45,13 @@ type RecoveryServer struct {
grpcServer server
factory kmsFactory
log *logger.Logger
log *slog.Logger
recoverproto.UnimplementedAPIServer
}
// New returns a new RecoveryServer.
func New(issuer atls.Issuer, factory kmsFactory, log *logger.Logger) *RecoveryServer {
func New(issuer atls.Issuer, factory kmsFactory, log *slog.Logger) *RecoveryServer {
server := &RecoveryServer{
log: log,
factory: factory,
@ -58,7 +59,7 @@ func New(issuer atls.Issuer, factory kmsFactory, log *logger.Logger) *RecoverySe
grpcServer := grpc.NewServer(
grpc.Creds(atlscredentials.New(issuer, nil)),
log.Named("gRPC").GetServerStreamInterceptor(),
logger.GetServerStreamInterceptor(log.WithGroup("gRPC")),
)
recoverproto.RegisterAPIServer(grpcServer, server)
@ -71,7 +72,7 @@ func New(issuer atls.Issuer, factory kmsFactory, log *logger.Logger) *RecoverySe
// The server will shut down when the call is successful and the keys are returned.
// Additionally, the server can be shutdown by canceling the context.
func (s *RecoveryServer) Serve(ctx context.Context, listener net.Listener, diskUUID string) (diskKey, measurementSecret []byte, err error) {
s.log.Infof("Starting RecoveryServer")
s.log.Info("Starting RecoveryServer")
s.diskUUID = diskUUID
recoveryDone := make(chan struct{}, 1)
var serveErr error
@ -88,7 +89,7 @@ func (s *RecoveryServer) Serve(ctx context.Context, listener net.Listener, diskU
for {
select {
case <-ctx.Done():
s.log.Infof("Context canceled, shutting down server")
s.log.Info("Context canceled, shutting down server")
s.grpcServer.GracefulStop()
return nil, nil, ctx.Err()
case <-recoveryDone:
@ -106,7 +107,7 @@ func (s *RecoveryServer) Recover(ctx context.Context, req *recoverproto.RecoverM
defer s.mux.Unlock()
log := s.log.With(slog.String("peer", grpclog.PeerAddrFromContext(ctx)))
log.Infof("Received recover call")
log.Info("Received recover call")
cloudKms, err := s.factory(ctx, req.StorageUri, req.KmsUri)
if err != nil {
@ -123,7 +124,7 @@ func (s *RecoveryServer) Recover(ctx context.Context, req *recoverproto.RecoverM
}
s.stateDiskKey = stateDiskKey
s.measurementSecret = measurementSecret
log.Infof("Received state disk key and measurement secret, shutting down server")
log.Info("Received state disk key and measurement secret, shutting down server")
go s.grpcServer.GracefulStop()
return &recoverproto.RecoverResponse{}, nil
@ -131,18 +132,18 @@ func (s *RecoveryServer) Recover(ctx context.Context, req *recoverproto.RecoverM
// StubServer implements the RecoveryServer interface but does not actually start a server.
type StubServer struct {
log *logger.Logger
log *slog.Logger
}
// NewStub returns a new stubbed RecoveryServer.
// We use this to avoid having to start a server for worker nodes, since they don't require manual recovery.
func NewStub(log *logger.Logger) *StubServer {
func NewStub(log *slog.Logger) *StubServer {
return &StubServer{log: log}
}
// Serve waits until the context is canceled and returns nil.
func (s *StubServer) Serve(ctx context.Context, _ net.Listener, _ string) ([]byte, []byte, error) {
s.log.Infof("Running as worker node, skipping recovery server")
s.log.Info("Running as worker node, skipping recovery server")
<-ctx.Done()
return nil, nil, ctx.Err()
}

View file

@ -9,6 +9,7 @@ package recoveryserver
import (
"context"
"errors"
"log/slog"
"sync"
"testing"
"time"
@ -35,7 +36,7 @@ func TestMain(m *testing.M) {
func TestServe(t *testing.T) {
assert := assert.New(t)
log := logger.NewTest(t)
log := slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))
uuid := "uuid"
server := New(atls.NewFakeIssuer(variant.Dummy{}), newStubKMS(nil, nil), log)
dialer := testdialer.NewBufconnDialer()
@ -106,7 +107,7 @@ func TestRecover(t *testing.T) {
ctx := context.Background()
serverUUID := "uuid"
server := New(atls.NewFakeIssuer(variant.Dummy{}), tc.factory, logger.NewTest(t))
server := New(atls.NewFakeIssuer(variant.Dummy{}), tc.factory, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)))
netDialer := testdialer.NewBufconnDialer()
listener := netDialer.GetListener("192.0.2.1:1234")

View file

@ -15,16 +15,15 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"net"
"strconv"
"time"
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/role"
"github.com/edgelesssys/constellation/v2/joinservice/joinproto"
"go.uber.org/zap"
"google.golang.org/grpc"
"k8s.io/utils/clock"
)
@ -47,12 +46,12 @@ type RejoinClient struct {
dialer grpcDialer
metadataAPI metadataAPI
log *logger.Logger
log *slog.Logger
}
// New returns a new RejoinClient.
func New(dial grpcDialer, nodeInfo metadata.InstanceMetadata,
meta metadataAPI, log *logger.Logger,
meta metadataAPI, log *slog.Logger,
) *RejoinClient {
return &RejoinClient{
nodeInfo: nodeInfo,
@ -70,22 +69,22 @@ func New(dial grpcDialer, nodeInfo metadata.InstanceMetadata,
// from the metadata API and send rejoin requests to them.
// The function returns after a successful rejoin request has been performed.
func (c *RejoinClient) Start(ctx context.Context, diskUUID string) (diskKey, measurementSecret []byte) {
c.log.Infof("Starting RejoinClient")
c.log.Info("Starting RejoinClient")
c.diskUUID = diskUUID
ticker := c.clock.NewTicker(c.interval)
defer ticker.Stop()
defer c.log.Infof("RejoinClient stopped")
defer c.log.Info("RejoinClient stopped")
for {
endpoints, err := c.getJoinEndpoints()
if err != nil {
c.log.With(zap.Error(err)).Errorf("Failed to get control-plane endpoints")
c.log.With(slog.Any("error", err)).Error("Failed to get control-plane endpoints")
} else {
c.log.With(zap.Strings("endpoints", endpoints)).Infof("Received list with JoinService endpoints")
c.log.With(slog.Any("endpoints", endpoints)).Info("Received list with JoinService endpoints")
diskKey, measurementSecret, err = c.tryRejoinWithAvailableServices(ctx, endpoints)
if err == nil {
c.log.Infof("Successfully retrieved rejoin ticket")
c.log.Info("Successfully retrieved rejoin ticket")
return diskKey, measurementSecret
}
}
@ -101,12 +100,12 @@ func (c *RejoinClient) Start(ctx context.Context, diskUUID string) (diskKey, mea
// tryRejoinWithAvailableServices tries sending rejoin requests to the available endpoints.
func (c *RejoinClient) tryRejoinWithAvailableServices(ctx context.Context, endpoints []string) (diskKey, measurementSecret []byte, err error) {
for _, endpoint := range endpoints {
c.log.With(zap.String("endpoint", endpoint)).Infof("Requesting rejoin ticket")
c.log.With(slog.String("endpoint", endpoint)).Info("Requesting rejoin ticket")
rejoinTicket, err := c.requestRejoinTicket(endpoint)
if err == nil {
return rejoinTicket.StateDiskKey, rejoinTicket.MeasurementSecret, nil
}
c.log.With(zap.Error(err), zap.String("endpoint", endpoint)).Warnf("Failed to rejoin on endpoint")
c.log.With(slog.Any("error", err), slog.String("endpoint", endpoint)).Warn("Failed to rejoin on endpoint")
// stop requesting additional endpoints if the context is done
select {
@ -115,7 +114,7 @@ func (c *RejoinClient) tryRejoinWithAvailableServices(ctx context.Context, endpo
default:
}
}
c.log.Errorf("Failed to rejoin on all endpoints")
c.log.Error("Failed to rejoin on all endpoints")
return nil, nil, errors.New("failed to join on all endpoints")
}

View file

@ -9,6 +9,7 @@ package rejoinclient
import (
"context"
"errors"
"log/slog"
"net"
"strconv"
"sync"
@ -56,7 +57,7 @@ func TestStartCancel(t *testing.T) {
dialer: dialer,
nodeInfo: metadata.InstanceMetadata{Role: role.Worker},
metadataAPI: metaAPI,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
timeout: time.Second * 30,
interval: time.Second,
clock: clock,
@ -216,7 +217,7 @@ func TestGetJoinEndpoints(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
client := New(nil, tc.nodeInfo, tc.meta, logger.NewTest(t))
client := New(nil, tc.nodeInfo, tc.meta, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)))
endpoints, err := client.getJoinEndpoints()
if tc.wantErr {
@ -292,7 +293,7 @@ func TestStart(t *testing.T) {
},
}
client := New(dialer, tc.nodeInfo, meta, logger.NewTest(t))
client := New(dialer, tc.nodeInfo, meta, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)))
passphrase, secret := client.Start(context.Background(), "uuid")
assert.Equal(diskKey, passphrase)

View file

@ -17,6 +17,7 @@ import (
"errors"
"fmt"
"io/fs"
"log/slog"
"net"
"os"
"path/filepath"
@ -31,10 +32,8 @@ import (
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/crypto"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/nodestate"
"github.com/spf13/afero"
"go.uber.org/zap"
)
const (
@ -49,7 +48,7 @@ const (
// Manager handles formatting, mapping, mounting and unmounting of state disks.
type Manager struct {
log *logger.Logger
log *slog.Logger
csp string
diskPath string
fs afero.Afero
@ -60,7 +59,7 @@ type Manager struct {
}
// New initializes a SetupManager with the given parameters.
func New(log *logger.Logger, csp string, diskPath string, fs afero.Afero,
func New(log *slog.Logger, csp string, diskPath string, fs afero.Afero,
mapper DeviceMapper, mounter Mounter, openDevice vtpm.TPMOpenFunc,
) *Manager {
return &Manager{
@ -82,7 +81,7 @@ func (s *Manager) PrepareExistingDisk(recover RecoveryDoer) error {
if err != nil {
return err
}
s.log.With(zap.String("uuid", uuid)).Infof("Preparing existing state disk")
s.log.With(slog.String("uuid", uuid)).Info("Preparing existing state disk")
endpoint := net.JoinHostPort("0.0.0.0", strconv.Itoa(constants.RecoveryPort))
passphrase, measurementSecret, err := recover.Do(uuid, endpoint)
@ -128,7 +127,7 @@ func (s *Manager) PrepareExistingDisk(recover RecoveryDoer) error {
// PrepareNewDisk prepares an instances state disk by formatting the disk as a LUKS device using a random passphrase.
func (s *Manager) PrepareNewDisk() error {
uuid, _ := s.mapper.DiskUUID()
s.log.With(zap.String("uuid", uuid)).Infof("Preparing new state disk")
s.log.With(slog.String("uuid", uuid)).Info("Preparing new state disk")
// generate and save temporary passphrase
passphrase := make([]byte, crypto.RNGLengthDefault)
@ -192,12 +191,12 @@ func (s *Manager) LogDevices() error {
devices = append(devices, fileInfo)
}
s.log.Infof("List of all available block devices and partitions:")
s.log.Info("List of all available block devices and partitions:")
for _, device := range devices {
var stat syscall.Statfs_t
dev := "/dev/" + device.Name()
if err := syscall.Statfs(dev, &stat); err != nil {
s.log.With(zap.Error(err)).Errorf("failed to statfs %s", dev)
s.log.With(slog.Any("error", err)).Error("failed to statfs %s", dev)
continue
}
@ -206,7 +205,7 @@ func (s *Manager) LogDevices() error {
free := stat.Bfree * uint64(stat.Bsize)
avail := stat.Bavail * uint64(stat.Bsize)
s.log.Infof(
s.log.Info(
"Name: %-15s, Size: %-10d, Mode: %s, ModTime: %s, Size = %-10d, Free = %-10d, Available = %-10d\n",
dev,
device.Size(),

View file

@ -11,6 +11,7 @@ import (
"errors"
"io"
"io/fs"
"log/slog"
"net"
"path/filepath"
"sync"
@ -136,7 +137,7 @@ func TestPrepareExistingDisk(t *testing.T) {
}
setupManager := &Manager{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
csp: "test",
diskPath: "disk-path",
fs: fs,
@ -214,7 +215,7 @@ func TestPrepareNewDisk(t *testing.T) {
assert := assert.New(t)
setupManager := &Manager{
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
csp: "test",
diskPath: "disk-path",
fs: tc.fs,
@ -270,7 +271,7 @@ func TestReadMeasurementSalt(t *testing.T) {
require.NoError(handler.WriteJSON("test-state.json", state, file.OptMkdirAll))
}
setupManager := New(logger.NewTest(t), "test", "disk-path", fs, nil, nil, nil)
setupManager := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), "test", "disk-path", fs, nil, nil, nil)
measurementSalt, err := setupManager.readMeasurementSalt("test-state.json")
if tc.wantErr {

View file

@ -12,11 +12,10 @@ import (
"fmt"
"math"
"testing"
"log/slog"
"github.com/edgelesssys/constellation/v2/disk-mapper/internal/diskencryption"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/martinjungblut/go-cryptsetup"
"go.uber.org/zap/zapcore"
)
func BenchmarkMapper(b *testing.B) {
@ -39,7 +38,7 @@ func BenchmarkMapper(b *testing.B) {
}
passphrase := "benchmark"
mapper, free, err := diskencryption.New(testPath, logger.New(logger.PlainLog, zapcore.InfoLevel))
mapper, free, err := diskencryption.New(testPath, slog.New(slog.NewPlainTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo})))
if err != nil {
b.Fatal("Failed to create mapper:", err)
}

View file

@ -18,11 +18,11 @@ import (
"strings"
"syscall"
"testing"
"log/slog"
"github.com/bazelbuild/rules_go/go/runfiles"
"github.com/edgelesssys/constellation/v2/disk-mapper/internal/diskencryption"
ccryptsetup "github.com/edgelesssys/constellation/v2/internal/cryptsetup"
"github.com/edgelesssys/constellation/v2/internal/logger"
cryptsetup "github.com/martinjungblut/go-cryptsetup"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -103,7 +103,7 @@ func TestMapper(t *testing.T) {
require.NoError(setup(1), "failed to setup test disk")
defer func() { require.NoError(teardown(), "failed to delete test disk") }()
mapper, free, err := diskencryption.New(devicePath, logger.NewTest(t))
mapper, free, err := diskencryption.New(devicePath, slog.New(slog.NewPlainTextHandler(logger.TestWriter{T: t}, nil))
require.NoError(err, "failed to initialize crypt device")
defer free()

View file

@ -21,7 +21,7 @@ import (
func servicesVersion(t *testing.T) (semver.Semver, error) {
t.Helper()
log := logger.NewTest(t)
log := slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))
settings := cli.New()
settings.KubeConfig = "constellation-admin.conf"
actionConfig := &action.Configuration{}

View file

@ -12,21 +12,19 @@ import (
"encoding/json"
"flag"
"fmt"
"log/slog"
"net"
"os"
"strings"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/joinservice/joinproto"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func main() {
log := logger.New(logger.JSONLog, zapcore.DebugLevel)
defer log.Sync()
log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug}))
jsEndpoint := flag.String("js-endpoint", "", "Join service endpoint to use.")
csp := flag.String("csp", "", "Cloud service provider to use.")
@ -38,13 +36,13 @@ func main() {
)
flag.Parse()
log.With(
zap.String("js-endpoint", *jsEndpoint),
zap.String("csp", *csp),
zap.String("variant", *attVariant),
).Infof("Running tests with flags")
slog.String("js-endpoint", *jsEndpoint),
slog.String("csp", *csp),
slog.String("variant", *attVariant),
).Info("Running tests with flags")
testCases := map[string]struct {
fn func(attVariant, csp, jsEndpoint string, log *logger.Logger) error
fn func(attVariant, csp, jsEndpoint string, log *slog.Logger) error
wantErr bool
}{
"JoinFromUnattestedNode": {
@ -58,44 +56,45 @@ func main() {
TestCases: make(map[string]testCaseOutput),
}
for name, tc := range testCases {
log.With(zap.String("testcase", name)).Infof("Running testcase")
log.With(slog.String("testcase", name)).Info("Running testcase")
err := tc.fn(*attVariant, *csp, *jsEndpoint, log)
switch {
case err == nil && tc.wantErr:
log.With(zap.Error(err), zap.String("testcase", name)).Errorf("Test case failed: Expected error but got none")
log.With(slog.Any("error", err), slog.String("testcase", name)).Error("Test case failed: Expected error but got none")
testOutput.TestCases[name] = testCaseOutput{
Passed: false,
Message: "Expected error but got none",
}
allPassed = false
case !tc.wantErr && err != nil:
log.With(zap.Error(err), zap.String("testcase", name)).Errorf("Test case failed: Got unexpected error")
log.With(slog.Any("error", err), slog.String("testcase", name)).Error("Test case failed: Got unexpected error")
testOutput.TestCases[name] = testCaseOutput{
Passed: false,
Message: fmt.Sprintf("Got unexpected error: %s", err),
}
allPassed = false
case tc.wantErr && err != nil:
log.With(zap.String("testcase", name)).Infof("Test case succeeded")
log.With(slog.String("testcase", name)).Info("Test case succeeded")
testOutput.TestCases[name] = testCaseOutput{
Passed: true,
Message: fmt.Sprintf("Got expected error: %s", err),
}
case !tc.wantErr && err == nil:
log.With(zap.String("testcase", name)).Infof("Test case succeeded")
log.With(slog.String("testcase", name)).Info("Test case succeeded")
testOutput.TestCases[name] = testCaseOutput{
Passed: true,
Message: "No error, as expected",
}
default:
log.With(zap.String("testcase", name)).Fatalf("invalid result")
log.With(slog.String("testcase", name)).Error("invalid result")
os.Exit(1)
}
}
testOutput.AllPassed = allPassed
log.With(zap.Any("result", testOutput)).Infof("Test completed")
log.With(slog.Any("result", testOutput)).Info("Test completed")
}
type testOutput struct {
@ -110,7 +109,7 @@ type testCaseOutput struct {
// JoinFromUnattestedNode simulates a join request from a Node that uses a stub issuer
// and thus cannot be attested correctly.
func JoinFromUnattestedNode(attVariant, csp, jsEndpoint string, log *logger.Logger) error {
func JoinFromUnattestedNode(attVariant, csp, jsEndpoint string, log *slog.Logger) error {
joiner, err := newMaliciousJoiner(attVariant, csp, jsEndpoint, log)
if err != nil {
return fmt.Errorf("creating malicious joiner: %w", err)
@ -125,7 +124,7 @@ func JoinFromUnattestedNode(attVariant, csp, jsEndpoint string, log *logger.Logg
// newMaliciousJoiner creates a new malicious joiner, i.e. a simulated node that issues
// an invalid join request.
func newMaliciousJoiner(attVariant, csp, endpoint string, log *logger.Logger) (*maliciousJoiner, error) {
func newMaliciousJoiner(attVariant, csp, endpoint string, log *slog.Logger) (*maliciousJoiner, error) {
var attVariantOid variant.Variant
var err error
if strings.EqualFold(attVariant, "default") {
@ -149,30 +148,30 @@ func newMaliciousJoiner(attVariant, csp, endpoint string, log *logger.Logger) (*
// maliciousJoiner simulates a malicious node joining a cluster.
type maliciousJoiner struct {
endpoint string
logger *logger.Logger
logger *slog.Logger
dialer *dialer.Dialer
}
// join issues a join request to the join service endpoint.
func (j *maliciousJoiner) join(ctx context.Context) (*joinproto.IssueJoinTicketResponse, error) {
j.logger.Debugf("Dialing join service endpoint %s", j.endpoint)
j.logger.Debug("Dialing join service endpoint %s", j.endpoint, "")
conn, err := j.dialer.Dial(ctx, j.endpoint)
if err != nil {
return nil, fmt.Errorf("dialing join service endpoint: %w", err)
}
defer conn.Close()
j.logger.Debugf("Successfully dialed join service endpoint %s", j.endpoint)
j.logger.Debug("Successfully dialed join service endpoint %s", j.endpoint, "")
protoClient := joinproto.NewAPIClient(conn)
j.logger.Debugf("Issuing join ticket")
j.logger.Debug("Issuing join ticket")
req := &joinproto.IssueJoinTicketRequest{
DiskUuid: "",
CertificateRequest: []byte{},
IsControlPlane: false,
}
res, err := protoClient.IssueJoinTicket(ctx, req)
j.logger.Debugf("Got join ticket response: %+v", res)
j.logger.Debug("Got join ticket response: %s", fmt.Sprintf("%+v", res), "")
if err != nil {
return nil, fmt.Errorf("issuing join ticket: %w", err)
}

View file

@ -9,14 +9,14 @@ package main
import (
"context"
"errors"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newCheckCmd() *cobra.Command {
@ -38,15 +38,15 @@ func runCheck(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
filesHelper, err := bazelfiles.New()
if err != nil {
return err
}
log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
bazelFiles, err := filesHelper.FindFiles()
if err != nil {
return err
@ -55,10 +55,10 @@ func runCheck(cmd *cobra.Command, _ []string) error {
var mirrorCheck mirrorChecker
switch {
case flags.mirrorUnauthenticated:
log.Debugf("Checking consistency of all referenced CAS objects without authentication.")
log.Debug("Checking consistency of all referenced CAS objects without authentication.")
mirrorCheck = mirror.NewUnauthenticated(flags.mirrorBaseURL, mirror.Run, log)
case flags.mirror:
log.Debugf("Checking consistency of all referenced CAS objects using AWS S3.")
log.Debug("Checking consistency of all referenced CAS objects using AWS S3.")
mirrorCheck, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, mirror.Run, log)
if err != nil {
return err
@ -78,17 +78,17 @@ func runCheck(cmd *cobra.Command, _ []string) error {
}
}
if len(iss) > 0 {
log.Infof("Found issues in rules")
log.Info("Found issues in rules")
iss.Report(cmd.OutOrStdout())
return errors.New("found issues in rules")
}
log.Infof("No issues found 🦭")
log.Info("No issues found 🦭")
return nil
}
func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *logger.Logger) (issByFile issues.ByFile, err error) {
log.Debugf("Checking file: %s", bazelFile.RelPath)
func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *slog.Logger) (issByFile issues.ByFile, err error) {
log.Debug("Checking file: %s", bazelFile.RelPath)
issByFile = issues.NewByFile()
buildfile, err := fileHelper.LoadFile(bazelFile)
if err != nil {
@ -96,12 +96,12 @@ func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCh
}
found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 {
log.Debugf("No rules found in file: %s", bazelFile.RelPath)
log.Debug("No rules found in file: %s", bazelFile.RelPath)
return issByFile, nil
}
log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath)
log.Debug("Found %d rules in file: %s", len(found), bazelFile.RelPath)
for _, rule := range found {
log.Debugf("Checking rule: %s", rule.Name())
log.Debug("Checking rule: %s", rule.Name())
// check if the rule is a valid pinned dependency rule (has all required attributes)
if issues := rules.ValidatePinned(rule); len(issues) > 0 {
issByFile.Add(rule.Name(), issues...)
@ -130,7 +130,7 @@ type checkFlags struct {
region string
bucket string
mirrorBaseURL string
logLevel zapcore.Level
logLevel slog.Level
}
func parseCheckFlags(cmd *cobra.Command) (checkFlags, error) {
@ -146,9 +146,9 @@ func parseCheckFlags(cmd *cobra.Command) (checkFlags, error) {
if err != nil {
return checkFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
region, err := cmd.Flags().GetString("region")
if err != nil {

View file

@ -9,15 +9,15 @@ package main
import (
"context"
"errors"
"log/slog"
"os"
"github.com/bazelbuild/buildtools/build"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newFixCmd() *cobra.Command {
@ -38,15 +38,15 @@ func runFix(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
fileHelper, err := bazelfiles.New()
if err != nil {
return err
}
log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
bazelFiles, err := fileHelper.FindFiles()
if err != nil {
return err
@ -55,10 +55,10 @@ func runFix(cmd *cobra.Command, _ []string) error {
var mirrorUpload mirrorUploader
switch {
case flags.unauthenticated:
log.Warnf("Fixing rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.")
log.Warn("Fixing rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.")
mirrorUpload = mirror.NewUnauthenticated(flags.mirrorBaseURL, flags.dryRun, log)
default:
log.Debugf("Fixing rules with authentication for AWS S3.")
log.Debug("Fixing rules with authentication for AWS S3.")
mirrorUpload, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, flags.dryRun, log)
if err != nil {
return err
@ -76,29 +76,29 @@ func runFix(cmd *cobra.Command, _ []string) error {
}
}
if len(issues) > 0 {
log.Warnf("Found %d unfixable issues in rules", len(issues))
log.Warn("Found %d unfixable issues in rules", len(issues))
issues.Report(cmd.OutOrStdout())
return errors.New("found issues in rules")
}
log.Infof("No unfixable issues found")
log.Info("No unfixable issues found")
return nil
}
func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *logger.Logger) (iss issues.ByFile, err error) {
func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) {
iss = issues.NewByFile()
var changed bool // true if any rule in this file was changed
log.Infof("Checking file: %s", bazelFile.RelPath)
log.Info("Checking file: %s", bazelFile.RelPath)
buildfile, err := fileHelper.LoadFile(bazelFile)
if err != nil {
return iss, err
}
found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 {
log.Debugf("No rules found in file: %s", bazelFile.RelPath)
log.Debug("No rules found in file: %s", bazelFile.RelPath)
return iss, nil
}
log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath)
log.Debug("Found %d rules in file: %s", len(found), bazelFile.RelPath)
for _, rule := range found {
changedRule, ruleIssues := fixRule(ctx, mirrorUpload, rule, log)
if len(ruleIssues) > 0 {
@ -108,11 +108,11 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
}
if len(iss) > 0 {
log.Warnf("File %s has issues. Not saving!", bazelFile.RelPath)
log.Warn("File %s has issues. Not saving!", bazelFile.RelPath)
return iss, nil
}
if !changed {
log.Debugf("No changes to file: %s", bazelFile.RelPath)
log.Debug("No changes to file: %s", bazelFile.RelPath)
return iss, nil
}
if dryRun {
@ -120,10 +120,10 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
if err != nil {
return iss, err
}
log.Infof("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
return iss, nil
}
log.Infof("Saving updated file: %s", bazelFile.RelPath)
log.Info("Saving updated file: %s", bazelFile.RelPath)
if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil {
return iss, err
}
@ -131,7 +131,7 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
return iss, nil
}
func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) error {
func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) error {
upstreamURLs, err := rules.UpstreamURLs(rule)
if err != nil {
return err
@ -141,12 +141,12 @@ func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *bu
return err
}
rules.SetHash(rule, learnedHash)
log.Debugf("Learned hash for rule %s: %s", rule.Name(), learnedHash)
log.Debug("Learned hash for rule %s: %s", rule.Name(), learnedHash)
return nil
}
func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) (changed bool, iss []error) {
log.Debugf("Fixing rule: %s", rule.Name())
func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) {
log.Debug("Fixing rule: %s", rule.Name())
// try to learn the hash
if hash, err := rules.GetHash(rule); err != nil || hash == "" {
@ -182,14 +182,14 @@ func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule,
}
if checkErr := mirrorUpload.Check(ctx, expectedHash); checkErr != nil {
log.Infof("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash)
log.Info("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash)
if uploadErr := mirrorUpload.Mirror(ctx, expectedHash, rules.GetURLs(rule)); uploadErr != nil {
// don't try to fix the rule if the upload failed
iss = append(iss, uploadErr)
return changed, iss
}
} else {
log.Infof("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash)
log.Info("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash)
}
// now the artifact is mirrored (if it wasn't already) and we can fix the rule
@ -211,7 +211,7 @@ type fixFlags struct {
region string
bucket string
mirrorBaseURL string
logLevel zapcore.Level
logLevel slog.Level
}
func parseFixFlags(cmd *cobra.Command) (fixFlags, error) {
@ -227,9 +227,9 @@ func parseFixFlags(cmd *cobra.Command) (fixFlags, error) {
if err != nil {
return fixFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
region, err := cmd.Flags().GetString("region")
if err != nil {

View file

@ -135,7 +135,7 @@ func TestMirror(t *testing.T) {
}(),
},
unauthenticated: tc.unauthenticated,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
}
err := m.Mirror(context.Background(), tc.hash, []string{tc.upstreamURL})
if tc.wantErr {
@ -178,7 +178,7 @@ func TestLearn(t *testing.T) {
body: tc.upstreamResponse,
},
},
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
}
gotHash, err := m.Learn(context.Background(), []string{"https://example.com/foo"})
if tc.wantErr {
@ -272,7 +272,7 @@ func TestCheck(t *testing.T) {
response: tc.authenticatedResponse,
err: tc.authenticatedErr,
},
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
}
err := m.Check(context.Background(), tc.hash)
if tc.wantErr {

View file

@ -9,15 +9,15 @@ package main
import (
"context"
"errors"
"log/slog"
"os"
"github.com/bazelbuild/buildtools/build"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newUpgradeCmd() *cobra.Command {
@ -38,15 +38,15 @@ func runUpgrade(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
fileHelper, err := bazelfiles.New()
if err != nil {
return err
}
log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
bazelFiles, err := fileHelper.FindFiles()
if err != nil {
return err
@ -55,10 +55,10 @@ func runUpgrade(cmd *cobra.Command, _ []string) error {
var mirrorUpload mirrorUploader
switch {
case flags.unauthenticated:
log.Warnf("Upgrading rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.")
log.Warn("Upgrading rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.")
mirrorUpload = mirror.NewUnauthenticated(flags.mirrorBaseURL, flags.dryRun, log)
default:
log.Debugf("Upgrading rules with authentication for AWS S3.")
log.Debug("Upgrading rules with authentication for AWS S3.")
mirrorUpload, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, flags.dryRun, log)
if err != nil {
return err
@ -76,29 +76,29 @@ func runUpgrade(cmd *cobra.Command, _ []string) error {
}
}
if len(issues) > 0 {
log.Warnf("Found %d issues in rules", len(issues))
log.Warn("Found %d issues in rules", len(issues))
issues.Report(cmd.OutOrStdout())
return errors.New("found issues in rules")
}
log.Infof("No issues found")
log.Info("No issues found")
return nil
}
func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *logger.Logger) (iss issues.ByFile, err error) {
func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) {
iss = issues.NewByFile()
var changed bool // true if any rule in this file was changed
log.Infof("Checking file: %s", bazelFile.RelPath)
log.Info("Checking file: %s", bazelFile.RelPath)
buildfile, err := fileHelper.LoadFile(bazelFile)
if err != nil {
return iss, err
}
found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 {
log.Debugf("No rules found in file: %s", bazelFile.RelPath)
log.Debug("No rules found in file: %s", bazelFile.RelPath)
return iss, nil
}
log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath)
log.Debug("Found %d rules in file: %s", len(found), bazelFile.RelPath)
for _, rule := range found {
changedRule, ruleIssues := upgradeRule(ctx, mirrorUpload, rule, log)
if len(ruleIssues) > 0 {
@ -108,11 +108,11 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
}
if len(iss) > 0 {
log.Warnf("File %s has issues. Not saving!", bazelFile.RelPath)
log.Warn("File %s has issues. Not saving!", bazelFile.RelPath)
return iss, nil
}
if !changed {
log.Debugf("No changes to file: %s", bazelFile.RelPath)
log.Debug("No changes to file: %s", bazelFile.RelPath)
return iss, nil
}
if dryRun {
@ -120,10 +120,10 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
if err != nil {
return iss, err
}
log.Infof("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
return iss, nil
}
log.Infof("Saving updated file: %s", bazelFile.RelPath)
log.Info("Saving updated file: %s", bazelFile.RelPath)
if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil {
return iss, err
}
@ -131,12 +131,12 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
return iss, nil
}
func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) (changed bool, iss []error) {
log.Debugf("Upgrading rule: %s", rule.Name())
func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) {
log.Debug("Upgrading rule: %s", rule.Name())
upstreamURLs, err := rules.UpstreamURLs(rule)
if errors.Is(err, rules.ErrNoUpstreamURL) {
log.Debugf("Rule has no upstream URL. Skipping.")
log.Debug("Rule has no upstream URL. Skipping.")
return false, nil
} else if err != nil {
iss = append(iss, err)
@ -152,7 +152,7 @@ func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.R
existingHash, err := rules.GetHash(rule)
if err == nil && learnedHash == existingHash {
log.Debugf("Rule already upgraded. Skipping.")
log.Debug("Rule already upgraded. Skipping.")
return false, nil
}
@ -177,7 +177,7 @@ type upgradeFlags struct {
region string
bucket string
mirrorBaseURL string
logLevel zapcore.Level
logLevel slog.Level
}
func parseUpgradeFlags(cmd *cobra.Command) (upgradeFlags, error) {
@ -193,9 +193,9 @@ func parseUpgradeFlags(cmd *cobra.Command) (upgradeFlags, error) {
if err != nil {
return upgradeFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
region, err := cmd.Flags().GetString("region")
if err != nil {

View file

@ -8,8 +8,10 @@ SPDX-License-Identifier: AGPL-3.0-only
package main
import (
"context"
"flag"
"context"
"flag"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/constants"
@ -19,36 +21,39 @@ import (
)
var (
refFlag = flag.String("ref", "", "the reference name of the image")
streamFlag = flag.String("stream", "", "the stream name of the image")
versionFlag = flag.String("version", "", "the version of the image")
refFlag = flag.String("ref", "", "the reference name of the image")
streamFlag = flag.String("stream", "", "the stream name of the image")
versionFlag = flag.String("version", "", "the version of the image")
)
func main() {
log := logger.New(logger.PlainLog, zapcore.DebugLevel)
ctx := context.Background()
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug}))
ctx := context.Background()
flag.Parse()
if *refFlag == "" {
log.Fatalf("ref must be set")
}
if *streamFlag == "" {
log.Fatalf("stream must be set")
}
if *versionFlag == "" {
log.Fatalf("version must be set")
}
flag.Parse()
if *refFlag == "" {
log.Error("ref must be set")
os.Exit(1)
}
if *streamFlag == "" {
log.Error("stream must be set")
os.Exit(1)
}
if *versionFlag == "" {
log.Error("version must be set")
os.Exit(1)
}
cliInfo := versionsapi.CLIInfo{
Ref: *refFlag,
Stream: *streamFlag,
Version: *versionFlag,
Kubernetes: []string{},
}
cliInfo := versionsapi.CLIInfo{
Ref: *refFlag,
Stream: *streamFlag,
Version: *versionFlag,
Kubernetes: []string{},
}
for _, v := range versions.VersionConfigs {
cliInfo.Kubernetes = append(cliInfo.Kubernetes, v.ClusterVersion)
}
for _, v := range versions.VersionConfigs {
cliInfo.Kubernetes = append(cliInfo.Kubernetes, v.ClusterVersion)
}
c, cclose, err := versionsapi.NewClient(ctx, "eu-central-1", "cdn-constellation-backend", constants.CDNDefaultDistributionID, false, log)
if err != nil {
@ -60,7 +65,8 @@ func main() {
}
}()
if err := c.UpdateCLIInfo(ctx, cliInfo); err != nil {
log.Fatalf("updating cli info: %w", err)
}
if err := c.UpdateCLIInfo(ctx, cliInfo); err != nil {
log.Error("updating cli info: %w", err)
os.Exit(1)
}
}

View file

@ -8,15 +8,14 @@ package main
import (
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"strings"
"github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/extract"
"github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/inject"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newCodegenCmd() *cobra.Command {
@ -44,15 +43,15 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag)
if err != nil {
return fmt.Errorf("splitting OCI image reference %q: %w", flags.imageRepoTag, err)
}
log.Debugf("Generating Go code for OCI image %s.", name)
log.Debug("Generating Go code for OCI image %s.", name)
ociIndexPath := filepath.Join(flags.ociPath, "index.json")
index, err := os.Open(ociIndexPath)
@ -78,7 +77,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
return err
}
log.Debugf("OCI image digest: %s", digest)
log.Debug("OCI image digest: %s", digest)
if err := inject.Render(out, inject.PinningValues{
Package: flags.pkg,
@ -92,7 +91,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("rendering Go code: %w", err)
}
log.Debugf("Go code created at %q 🤖", flags.output)
log.Debug("Go code created at %q 🤖", flags.output)
return nil
}
@ -102,7 +101,7 @@ type codegenFlags struct {
pkg string
identifier string
imageRepoTag string
logLevel zapcore.Level
logLevel slog.Level
}
func parseCodegenFlags(cmd *cobra.Command) (codegenFlags, error) {
@ -137,9 +136,9 @@ func parseCodegenFlags(cmd *cobra.Command) (codegenFlags, error) {
if err != nil {
return codegenFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return codegenFlags{

View file

@ -8,12 +8,11 @@ package main
import (
"fmt"
"io"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/sums"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newMergeCmd() *cobra.Command {
@ -35,10 +34,10 @@ func runMerge(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
log.Debugf("Merging sum file from %q into %q.", flags.inputs, flags.output)
log.Debug("Merging sum file from %q into %q.", flags.inputs, flags.output)
var out io.Writer
if flags.output == "-" {
@ -61,7 +60,7 @@ func runMerge(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("creating merged sum file: %w", err)
}
log.Debugf("Sum file created at %q 🤖", flags.output)
log.Debug("Sum file created at %q 🤖", flags.output)
return nil
}
@ -93,7 +92,7 @@ func parseInput(input string) ([]sums.PinnedImageReference, error) {
type mergeFlags struct {
inputs []string
output string
logLevel zapcore.Level
logLevel slog.Level
}
func parseMergeFlags(cmd *cobra.Command) (mergeFlags, error) {
@ -109,9 +108,9 @@ func parseMergeFlags(cmd *cobra.Command) (mergeFlags, error) {
if err != nil {
return mergeFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return mergeFlags{

View file

@ -8,15 +8,14 @@ package main
import (
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"strings"
"github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/extract"
"github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/sums"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newSumCmd() *cobra.Command {
@ -41,15 +40,15 @@ func runSum(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag)
if err != nil {
return fmt.Errorf("splitting repo tag: %w", err)
}
log.Debugf("Generating sum file for OCI image %s.", name)
log.Debug(fmt.Sprintf("Generating sum file for OCI image %s.", name))
ociIndexPath := filepath.Join(flags.ociPath, "index.json")
index, err := os.Open(ociIndexPath)
@ -75,7 +74,7 @@ func runSum(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("extracting OCI image digest: %w", err)
}
log.Debugf("OCI image digest: %s", digest)
log.Debug(fmt.Sprintf("OCI image digest: %s", digest))
refs := []sums.PinnedImageReference{
{
@ -91,7 +90,7 @@ func runSum(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("creating sum file: %w", err)
}
log.Debugf("Sum file created at %q 🤖", flags.output)
log.Debug(fmt.Sprintf("Sum file created at %q 🤖", flags.output))
return nil
}
@ -99,7 +98,7 @@ type sumFlags struct {
ociPath string
output string
imageRepoTag string
logLevel zapcore.Level
logLevel slog.Level
}
func parseSumFlags(cmd *cobra.Command) (sumFlags, error) {
@ -126,9 +125,9 @@ func parseSumFlags(cmd *cobra.Command) (sumFlags, error) {
if err != nil {
return sumFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return sumFlags{

View file

@ -9,33 +9,34 @@ SPDX-License-Identifier: AGPL-3.0-only
package main
import (
"flag"
"flag"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/server"
"github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper"
"github.com/edgelesssys/constellation/v2/internal/logger"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"libvirt.org/go/libvirt"
"github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/server"
"github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper"
"libvirt.org/go/libvirt"
)
func main() {
bindPort := flag.String("port", "8080", "Port to bind to")
targetNetwork := flag.String("network", "constellation-network", "Name of the network in QEMU to use")
libvirtURI := flag.String("libvirt-uri", "qemu:///system", "URI of the libvirt connection")
initSecretHash := flag.String("initsecrethash", "", "brcypt hash of the init secret")
flag.Parse()
bindPort := flag.String("port", "8080", "Port to bind to")
targetNetwork := flag.String("network", "constellation-network", "Name of the network in QEMU to use")
libvirtURI := flag.String("libvirt-uri", "qemu:///system", "URI of the libvirt connection")
initSecretHash := flag.String("initsecrethash", "", "brcypt hash of the init secret")
flag.Parse()
log := logger.New(logger.JSONLog, zapcore.InfoLevel)
log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo}))
conn, err := libvirt.NewConnect(*libvirtURI)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to connect to libvirt")
}
defer conn.Close()
conn, err := libvirt.NewConnect(*libvirtURI)
if err != nil {
log.With(slog.Any("error", err)).Error("Failed to connect to libvirt")
os.Exit(1)
}
defer conn.Close()
serv := server.New(log, *targetNetwork, *initSecretHash, &virtwrapper.Connect{Conn: conn})
if err := serv.ListenAndServe(*bindPort); err != nil {
log.With(zap.Error(err)).Fatalf("Failed to serve")
}
serv := server.New(log, *targetNetwork, *initSecretHash, &virtwrapper.Connect{Conn: conn})
if err := serv.ListenAndServe(*bindPort); err != nil {
log.With(slog.Any("error", err)).Error("Failed to serve")
os.Exit(1)
}
}

View file

@ -9,27 +9,27 @@ package server
import (
"encoding/json"
"fmt"
"io"
"log/slog"
"net"
"net/http"
"strings"
"github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper"
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/role"
"go.uber.org/zap"
)
// Server that provides QEMU metadata.
type Server struct {
log *logger.Logger
log *slog.Logger
virt virConnect
network string
initSecretHashVal []byte
}
// New creates a new Server.
func New(log *logger.Logger, network, initSecretHash string, conn virConnect) *Server {
func New(log *slog.Logger, network, initSecretHash string, conn virConnect) *Server {
return &Server{
log: log,
virt: conn,
@ -55,25 +55,25 @@ func (s *Server) ListenAndServe(port string) error {
return err
}
s.log.Infof("Starting QEMU metadata API on %s", lis.Addr())
s.log.Info("Starting QEMU metadata API on %s", lis.Addr())
return server.Serve(lis)
}
// listSelf returns peer information about the instance issuing the request.
func (s *Server) listSelf(w http.ResponseWriter, r *http.Request) {
log := s.log.With(zap.String("peer", r.RemoteAddr))
log.Infof("Serving GET request for /self")
log := s.log.With(slog.String("peer", r.RemoteAddr))
log.Info("Serving GET request for /self")
remoteIP, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to parse remote address")
log.With(slog.Any("error", err)).Error("Failed to parse remote address")
http.Error(w, fmt.Sprintf("Failed to parse remote address: %s\n", err), http.StatusInternalServerError)
return
}
peers, err := s.listAll()
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to list peer metadata")
log.With(slog.Any("error", err)).Error("Failed to list peer metadata")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@ -85,23 +85,23 @@ func (s *Server) listSelf(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Infof("Request successful")
log.Info("Request successful")
return
}
}
log.Errorf("Failed to find peer in active leases")
log.Error("Failed to find peer in active leases")
http.Error(w, "No matching peer found", http.StatusNotFound)
}
// listPeers returns a list of all active peers.
func (s *Server) listPeers(w http.ResponseWriter, r *http.Request) {
log := s.log.With(zap.String("peer", r.RemoteAddr))
log.Infof("Serving GET request for /peers")
log := s.log.With(slog.String("peer", r.RemoteAddr))
log.Info("Serving GET request for /peers")
peers, err := s.listAll()
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to list peer metadata")
log.With(slog.Any("error", err)).Error("Failed to list peer metadata")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@ -111,38 +111,38 @@ func (s *Server) listPeers(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Infof("Request successful")
log.Info("Request successful")
}
// initSecretHash returns the hash of the init secret.
func (s *Server) initSecretHash(w http.ResponseWriter, r *http.Request) {
log := s.log.With(zap.String("initSecretHash", r.RemoteAddr))
log := s.log.With(slog.String("initSecretHash", r.RemoteAddr))
if r.Method != http.MethodGet {
log.With(zap.String("method", r.Method)).Errorf("Invalid method for /initSecretHash")
log.With(slog.String("method", r.Method)).Error("Invalid method for /initSecretHash")
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
log.Infof("Serving GET request for /initsecrethash")
log.Info("Serving GET request for /initsecrethash")
w.Header().Set("Content-Type", "text/plain")
_, err := w.Write(s.initSecretHashVal)
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to write init secret hash")
log.With(slog.Any("error", err)).Error("Failed to write init secret hash")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Infof("Request successful")
log.Info("Request successful")
}
// getEndpoint returns the IP address of the first control-plane instance.
// This allows us to fake a load balancer for QEMU instances.
func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) {
log := s.log.With(zap.String("peer", r.RemoteAddr))
log.Infof("Serving GET request for /endpoint")
log := s.log.With(slog.String("peer", r.RemoteAddr))
log.Info("Serving GET request for /endpoint")
net, err := s.virt.LookupNetworkByName(s.network)
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to lookup network")
log.With(slog.Any("error", err)).Error("Failed to lookup network")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@ -150,7 +150,7 @@ func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) {
leases, err := net.GetDHCPLeases()
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to get DHCP leases")
log.With(slog.Any("error", err)).Error("Failed to get DHCP leases")
http.Error(w, err.Error(), http.StatusInternalServerError)
}
@ -162,12 +162,12 @@ func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Infof("Request successful")
log.Info("Request successful")
return
}
}
log.Errorf("Failed to find control-plane peer in active leases")
log.Error("Failed to find control-plane peer in active leases")
http.Error(w, "No matching peer found", http.StatusNotFound)
}

View file

@ -11,6 +11,7 @@ import (
"encoding/json"
"errors"
"io"
"log/slog"
"net/http"
"net/http/httptest"
"testing"
@ -67,7 +68,7 @@ func TestListAll(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
server := New(logger.NewTest(t), "test", "initSecretHash", tc.connect)
server := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), "test", "initSecretHash", tc.connect)
res, err := server.listAll()
@ -138,7 +139,7 @@ func TestListSelf(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
server := New(logger.NewTest(t), "test", "initSecretHash", tc.connect)
server := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), "test", "initSecretHash", tc.connect)
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, "http://192.0.0.1/self", nil)
require.NoError(err)
@ -198,7 +199,7 @@ func TestListPeers(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
server := New(logger.NewTest(t), "test", "initSecretHash", tc.connect)
server := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), "test", "initSecretHash", tc.connect)
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, "http://192.0.0.1/peers", nil)
require.NoError(err)
@ -253,7 +254,7 @@ func TestInitSecretHash(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
server := New(logger.NewTest(t), "test", tc.wantHash, defaultConnect)
server := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), "test", tc.wantHash, defaultConnect)
req, err := http.NewRequestWithContext(context.Background(), tc.method, "http://192.0.0.1/initsecrethash", nil)
require.NoError(err)

View file

@ -0,0 +1,106 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package cmd
import (
"fmt"
"io"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/osimage"
"github.com/edgelesssys/constellation/v2/internal/osimage/archive"
awsupload "github.com/edgelesssys/constellation/v2/internal/osimage/aws"
"github.com/spf13/cobra"
)
// newAWSCmd returns the command that uploads an OS image to AWS.
func newAWSCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "aws",
Short: "Upload OS image to AWS",
Long: "Upload OS image to AWS.",
Args: cobra.ExactArgs(0),
RunE: runAWS,
}
cmd.Flags().String("aws-region", "eu-central-1", "AWS region used during AMI creation")
cmd.Flags().String("aws-bucket", "constellation-images", "S3 bucket used during AMI creation")
return cmd
}
func runAWS(cmd *cobra.Command, _ []string) error {
workdir := os.Getenv("BUILD_WORKING_DIRECTORY")
if len(workdir) > 0 {
must(os.Chdir(workdir))
}
flags, err := parseAWSFlags(cmd)
if err != nil {
return err
}
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
return err
}
defer func() {
if err := archiveCClose(cmd.Context()); err != nil {
log.Error("closing archive client: %v", err)
}
}()
uploadC, err := awsupload.New(flags.awsRegion, flags.awsBucket, log)
if err != nil {
return fmt.Errorf("uploading image: %w", err)
}
file, err := os.Open(flags.rawImage)
if err != nil {
return fmt.Errorf("uploading image: opening image file %w", err)
}
defer file.Close()
size, err := file.Seek(0, io.SeekEnd)
if err != nil {
return err
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
out := cmd.OutOrStdout()
if len(flags.out) > 0 {
outF, err := os.Create(flags.out)
if err != nil {
return fmt.Errorf("uploading image: opening output file %w", err)
}
defer outF.Close()
out = outF
}
uploadReq := &osimage.UploadRequest{
Provider: flags.provider,
Version: flags.version,
AttestationVariant: flags.attestationVariant,
SecureBoot: flags.secureBoot,
Size: size,
Timestamp: flags.timestamp,
Image: file,
}
if flags.secureBoot {
sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki)
if err != nil {
return err
}
uploadReq.SBDatabase = sbDatabase
uploadReq.UEFIVarStore = uefiVarStore
}
return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out)
}

View file

@ -0,0 +1,107 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package cmd
import (
"fmt"
"io"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/osimage"
"github.com/edgelesssys/constellation/v2/internal/osimage/archive"
azureupload "github.com/edgelesssys/constellation/v2/internal/osimage/azure"
"github.com/spf13/cobra"
)
// newAzureCmd returns the command that uploads an OS image to Azure.
func newAzureCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "azure",
Short: "Upload OS image to Azure",
Long: "Upload OS image to Azure.",
Args: cobra.ExactArgs(0),
RunE: runAzure,
}
cmd.Flags().String("az-subscription", "0d202bbb-4fa7-4af8-8125-58c269a05435", "Azure subscription to use")
cmd.Flags().String("az-location", "northeurope", "Azure location to use")
cmd.Flags().String("az-resource-group", "constellation-images", "Azure resource group to use")
return cmd
}
func runAzure(cmd *cobra.Command, _ []string) error {
workdir := os.Getenv("BUILD_WORKING_DIRECTORY")
if len(workdir) > 0 {
must(os.Chdir(workdir))
}
flags, err := parseAzureFlags(cmd)
if err != nil {
return err
}
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
return err
}
defer func() {
if err := archiveCClose(cmd.Context()); err != nil {
log.Error("closing archive client: %v", err)
}
}()
uploadC, err := azureupload.New(flags.azSubscription, flags.azLocation, flags.azResourceGroup, log)
if err != nil {
return fmt.Errorf("uploading image: %w", err)
}
file, err := os.Open(flags.rawImage)
if err != nil {
return fmt.Errorf("uploading image: opening image file %w", err)
}
defer file.Close()
size, err := file.Seek(0, io.SeekEnd)
if err != nil {
return err
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
out := cmd.OutOrStdout()
if len(flags.out) > 0 {
outF, err := os.Create(flags.out)
if err != nil {
return fmt.Errorf("uploading image: opening output file %w", err)
}
defer outF.Close()
out = outF
}
uploadReq := &osimage.UploadRequest{
Provider: flags.provider,
Version: flags.version,
AttestationVariant: flags.attestationVariant,
SecureBoot: flags.secureBoot,
Size: size,
Timestamp: flags.timestamp,
Image: file,
}
if flags.secureBoot {
sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki)
if err != nil {
return err
}
uploadReq.SBDatabase = sbDatabase
uploadReq.UEFIVarStore = uefiVarStore
}
return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out)
}

View file

@ -8,20 +8,21 @@ package cmd
import (
"errors"
"log/slog"
"os"
"path/filepath"
"strings"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
type s3Flags struct {
region string
bucket string
distributionID string
logLevel zapcore.Level
logLevel slog.Level
}
func parseS3Flags(cmd *cobra.Command) (s3Flags, error) {
@ -41,9 +42,9 @@ func parseS3Flags(cmd *cobra.Command) (s3Flags, error) {
if err != nil {
return s3Flags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return s3Flags{
@ -84,7 +85,7 @@ func parseUploadMeasurementsFlags(cmd *cobra.Command) (measurementsFlags, error)
type mergeMeasurementsFlags struct {
out string
logLevel zapcore.Level
logLevel slog.Level
}
func parseMergeMeasurementsFlags(cmd *cobra.Command) (mergeMeasurementsFlags, error) {
@ -96,9 +97,9 @@ func parseMergeMeasurementsFlags(cmd *cobra.Command) (mergeMeasurementsFlags, er
if err != nil {
return mergeMeasurementsFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return mergeMeasurementsFlags{
@ -112,7 +113,7 @@ type envelopeMeasurementsFlags struct {
csp cloudprovider.Provider
attestationVariant string
in, out string
logLevel zapcore.Level
logLevel slog.Level
}
func parseEnvelopeMeasurementsFlags(cmd *cobra.Command) (envelopeMeasurementsFlags, error) {
@ -148,9 +149,9 @@ func parseEnvelopeMeasurementsFlags(cmd *cobra.Command) (envelopeMeasurementsFla
if err != nil {
return envelopeMeasurementsFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return envelopeMeasurementsFlags{

View file

@ -0,0 +1,107 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package cmd
import (
"fmt"
"io"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/osimage"
"github.com/edgelesssys/constellation/v2/internal/osimage/archive"
gcpupload "github.com/edgelesssys/constellation/v2/internal/osimage/gcp"
"github.com/spf13/cobra"
)
// newGCPCommand returns the command that uploads an OS image to GCP.
func newGCPCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "gcp",
Short: "Upload OS image to GCP",
Long: "Upload OS image to GCP.",
Args: cobra.ExactArgs(0),
RunE: runGCP,
}
cmd.Flags().String("gcp-project", "constellation-images", "GCP project to use")
cmd.Flags().String("gcp-location", "europe-west3", "GCP location to use")
cmd.Flags().String("gcp-bucket", "constellation-os-images", "GCP bucket to use")
return cmd
}
func runGCP(cmd *cobra.Command, _ []string) error {
workdir := os.Getenv("BUILD_WORKING_DIRECTORY")
if len(workdir) > 0 {
must(os.Chdir(workdir))
}
flags, err := parseGCPFlags(cmd)
if err != nil {
return err
}
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
return err
}
defer func() {
if err := archiveCClose(cmd.Context()); err != nil {
log.Error("closing archive client: %v", err)
}
}()
uploadC, err := gcpupload.New(cmd.Context(), flags.gcpProject, flags.gcpLocation, flags.gcpBucket, log)
if err != nil {
return fmt.Errorf("uploading image: %w", err)
}
file, err := os.Open(flags.rawImage)
if err != nil {
return fmt.Errorf("uploading image: opening image file %w", err)
}
defer file.Close()
size, err := file.Seek(0, io.SeekEnd)
if err != nil {
return err
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
out := cmd.OutOrStdout()
if len(flags.out) > 0 {
outF, err := os.Create(flags.out)
if err != nil {
return fmt.Errorf("uploading image: opening output file %w", err)
}
defer outF.Close()
out = outF
}
uploadReq := &osimage.UploadRequest{
Provider: flags.provider,
Version: flags.version,
AttestationVariant: flags.attestationVariant,
SecureBoot: flags.secureBoot,
Size: size,
Timestamp: flags.timestamp,
Image: file,
}
if flags.secureBoot {
sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki)
if err != nil {
return err
}
uploadReq.SBDatabase = sbDatabase
uploadReq.UEFIVarStore = uefiVarStore
}
return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out)
}

View file

@ -9,6 +9,7 @@ package cmd
import (
"encoding/json"
"fmt"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
@ -49,8 +50,8 @@ func runInfo(cmd *cobra.Command, args []string) error {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
info, err := readInfoArgs(args)
if err != nil {
return err
@ -62,7 +63,7 @@ func runInfo(cmd *cobra.Command, args []string) error {
}
defer func() {
if err := uploadCClose(cmd.Context()); err != nil {
log.Errorf("closing upload client: %v", err)
log.Error("closing upload client: %v", err)
}
}()
@ -70,7 +71,7 @@ func runInfo(cmd *cobra.Command, args []string) error {
if err != nil {
return fmt.Errorf("uploading image info: %w", err)
}
log.Infof("Uploaded image info to %s", url)
log.Info("Uploaded image info to %s", url)
return nil
}

View file

@ -9,10 +9,10 @@ package cmd
import (
"encoding/json"
"fmt"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
)
@ -53,8 +53,8 @@ func runEnvelopeMeasurements(cmd *cobra.Command, _ []string) error {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
f, err := os.Open(flags.in)
if err != nil {
@ -97,7 +97,7 @@ func runEnvelopeMeasurements(cmd *cobra.Command, _ []string) error {
if err := json.NewEncoder(out).Encode(enveloped); err != nil {
return fmt.Errorf("enveloping measurements: writing output file: %w", err)
}
log.Infof("Enveloped image measurements")
log.Info("Enveloped image measurements")
return nil
}

View file

@ -9,10 +9,10 @@ package cmd
import (
"encoding/json"
"fmt"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
)
@ -44,8 +44,8 @@ func runMergeMeasurements(cmd *cobra.Command, args []string) error {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
mergedMeasurements, err := readMeasurementsArgs(args)
if err != nil {
@ -65,7 +65,7 @@ func runMergeMeasurements(cmd *cobra.Command, args []string) error {
if err := json.NewEncoder(out).Encode(mergedMeasurements); err != nil {
return fmt.Errorf("merging measurements: writing output file: %w", err)
}
log.Infof("Merged image measurements")
log.Info("Merged image measurements")
return nil
}

View file

@ -8,6 +8,7 @@ package cmd
import (
"fmt"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/constants"
@ -52,8 +53,8 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
uploadC, uploadCClose, err := measurementsuploader.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
@ -61,7 +62,7 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error {
}
defer func() {
if err := uploadCClose(cmd.Context()); err != nil {
log.Errorf("closing upload client: %v", err)
log.Error("closing upload client: %v", err)
}
}()
@ -80,6 +81,6 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error {
if err != nil {
return fmt.Errorf("uploading image info: %w", err)
}
log.Infof("Uploaded image measurements to %s (and signature to %s)", measurementsURL, signatureURL)
log.Info("Uploaded image measurements to %s (and signature to %s)", measurementsURL, signatureURL)
return nil
}

View file

@ -0,0 +1,90 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package cmd
import (
"fmt"
"io"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/osimage"
"github.com/edgelesssys/constellation/v2/internal/osimage/archive"
nopupload "github.com/edgelesssys/constellation/v2/internal/osimage/nop"
"github.com/spf13/cobra"
)
func runNOP(cmd *cobra.Command, provider cloudprovider.Provider, _ []string) error {
workdir := os.Getenv("BUILD_WORKING_DIRECTORY")
if len(workdir) > 0 {
must(os.Chdir(workdir))
}
flags, err := parseCommonFlags(cmd)
if err != nil {
return err
}
flags.provider = provider
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
return err
}
defer func() {
if err := archiveCClose(cmd.Context()); err != nil {
log.Error("closing archive client: %v", err)
}
}()
uploadC := nopupload.New(log)
file, err := os.Open(flags.rawImage)
if err != nil {
return fmt.Errorf("uploading image: opening image file %w", err)
}
defer file.Close()
size, err := file.Seek(0, io.SeekEnd)
if err != nil {
return err
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
out := cmd.OutOrStdout()
if len(flags.out) > 0 {
outF, err := os.Create(flags.out)
if err != nil {
return fmt.Errorf("uploading image: opening output file %w", err)
}
defer outF.Close()
out = outF
}
uploadReq := &osimage.UploadRequest{
Provider: flags.provider,
Version: flags.version,
AttestationVariant: flags.attestationVariant,
SecureBoot: flags.secureBoot,
Size: size,
Timestamp: flags.timestamp,
Image: file,
}
if flags.secureBoot {
sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki)
if err != nil {
return err
}
uploadReq.SBDatabase = sbDatabase
uploadReq.UEFIVarStore = uefiVarStore
}
return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out)
}

View file

@ -8,15 +8,15 @@ package main
import (
"errors"
"fmt"
"log/slog"
"os"
"path"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/staticupload"
"github.com/spf13/cobra"
"go.uber.org/zap"
)
// newDeleteCmd creates the delete command.
@ -46,7 +46,7 @@ func newDeleteCmd() *cobra.Command {
}
func runDelete(cmd *cobra.Command, args []string) (retErr error) {
log := logger.New(logger.PlainLog, zap.DebugLevel).Named("attestationconfigapi")
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})).WithGroup("attestationconfigapi")
deleteCfg, err := newDeleteConfig(cmd, ([3]string)(args[:3]))
if err != nil {
@ -89,7 +89,7 @@ func runRecursiveDelete(cmd *cobra.Command, args []string) (retErr error) {
return fmt.Errorf("creating delete config: %w", err)
}
log := logger.New(logger.PlainLog, zap.DebugLevel).Named("attestationconfigapi")
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})).WithGroup("attestationconfigapi")
client, closeFn, err := staticupload.New(cmd.Context(), staticupload.Config{
Bucket: deleteCfg.bucket,
Region: deleteCfg.region,

View file

@ -9,6 +9,7 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"os"
"time"
@ -16,12 +17,10 @@ import (
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/staticupload"
"github.com/edgelesssys/constellation/v2/internal/verify"
"github.com/spf13/afero"
"github.com/spf13/cobra"
"go.uber.org/zap"
)
func newUploadCmd() *cobra.Command {
@ -61,7 +60,7 @@ func envCheck(_ *cobra.Command, _ []string) error {
func runUpload(cmd *cobra.Command, args []string) (retErr error) {
ctx := cmd.Context()
log := logger.New(logger.PlainLog, zap.DebugLevel).Named("attestationconfigapi")
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})).WithGroup("attestationconfigapi")
uploadCfg, err := newConfig(cmd, ([3]string)(args[:3]))
if err != nil {
@ -110,25 +109,25 @@ func uploadReport(ctx context.Context,
client *attestationconfigapi.Client,
cfg uploadConfig,
fs file.Handler,
log *logger.Logger,
log *slog.Logger,
) error {
if cfg.kind != snpReport {
return fmt.Errorf("kind %s not supported", cfg.kind)
}
log.Infof("Reading SNP report from file: %s", cfg.path)
log.Info("Reading SNP report from file: %s", cfg.path)
var report verify.Report
if err := fs.ReadJSON(cfg.path, &report); err != nil {
return fmt.Errorf("reading snp report: %w", err)
}
inputVersion := convertTCBVersionToSNPVersion(report.SNPReport.LaunchTCB)
log.Infof("Input report: %+v", inputVersion)
log.Info("Input report: %+v", inputVersion)
latestAPIVersionAPI, err := attestationconfigapi.NewFetcherWithCustomCDNAndCosignKey(cfg.url, cfg.cosignPublicKey).FetchSEVSNPVersionLatest(ctx, attestation)
if err != nil {
if errors.Is(err, attestationconfigapi.ErrNoVersionsFound) {
log.Infof("No versions found in API, but assuming that we are uploading the first version.")
log.Info("No versions found in API, but assuming that we are uploading the first version.")
} else {
return fmt.Errorf("fetching latest version: %w", err)
}
@ -137,7 +136,7 @@ func uploadReport(ctx context.Context,
latestAPIVersion := latestAPIVersionAPI.SEVSNPVersion
if err := client.UploadSEVSNPVersionLatest(ctx, attestation, inputVersion, latestAPIVersion, cfg.uploadDate, cfg.force); err != nil {
if errors.Is(err, attestationconfigapi.ErrNoNewerVersion) {
log.Infof("Input version: %+v is not newer than latest API version: %+v", inputVersion, latestAPIVersion)
log.Info("Input version: %+v is not newer than latest API version: %+v", inputVersion, latestAPIVersion)
return nil
}
return fmt.Errorf("updating latest version: %w", err)

View file

@ -9,11 +9,11 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"time"
apiclient "github.com/edgelesssys/constellation/v2/internal/api/client"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/sigstore"
"github.com/edgelesssys/constellation/v2/internal/staticupload"
@ -32,7 +32,7 @@ type Client struct {
}
// NewClient returns a new Client.
func NewClient(ctx context.Context, cfg staticupload.Config, cosignPwd, privateKey []byte, dryRun bool, versionWindowSize int, log *logger.Logger) (*Client, apiclient.CloseFunc, error) {
func NewClient(ctx context.Context, cfg staticupload.Config, cosignPwd, privateKey []byte, dryRun bool, versionWindowSize int, log *slog.Logger) (*Client, apiclient.CloseFunc, error) {
s3Client, clientClose, err := apiclient.NewClient(ctx, cfg.Region, cfg.Bucket, cfg.DistributionID, dryRun, log)
if err != nil {
return nil, nil, fmt.Errorf("failed to create s3 storage: %w", err)

View file

@ -55,23 +55,23 @@ func (c Client) UploadSEVSNPVersionLatest(ctx context.Context, attestation varia
return fmt.Errorf("list reported versions: %w", err)
}
if len(versionDates) < c.cacheWindowSize {
c.s3Client.Logger.Warnf("Skipping version update, found %d, expected %d reported versions.", len(versionDates), c.cacheWindowSize)
c.s3Client.Logger.Warn(fmt.Sprintf("Skipping version update, found %d, expected %d reported versions.", len(versionDates), c.cacheWindowSize))
return nil
}
minVersion, minDate, err := c.findMinVersion(ctx, attestation, versionDates)
if err != nil {
return fmt.Errorf("get minimal version: %w", err)
}
c.s3Client.Logger.Infof("Found minimal version: %+v with date: %s", minVersion, minDate)
c.s3Client.Logger.Info(fmt.Sprintf("Found minimal version: %+v with date: %s", minVersion, minDate))
shouldUpdateAPI, err := isInputNewerThanOtherVersion(minVersion, latestAPIVersion)
if err != nil {
return ErrNoNewerVersion
}
if !shouldUpdateAPI {
c.s3Client.Logger.Infof("Input version: %+v is not newer than latest API version: %+v", minVersion, latestAPIVersion)
c.s3Client.Logger.Info(fmt.Sprintf("Input version: %+v is not newer than latest API version: %+v", minVersion, latestAPIVersion))
return nil
}
c.s3Client.Logger.Infof("Input version: %+v is newer than latest API version: %+v", minVersion, latestAPIVersion)
c.s3Client.Logger.Info(fmt.Sprintf("Input version: %+v is newer than latest API version: %+v", minVersion, latestAPIVersion))
t, err := time.Parse(VersionFormat, minDate)
if err != nil {
return fmt.Errorf("parsing date: %w", err)
@ -79,7 +79,7 @@ func (c Client) UploadSEVSNPVersionLatest(ctx context.Context, attestation varia
if err := c.uploadSEVSNPVersion(ctx, attestation, minVersion, t); err != nil {
return fmt.Errorf("uploading version: %w", err)
}
c.s3Client.Logger.Infof("Successfully uploaded new Azure SEV-SNP version: %+v", minVersion)
c.s3Client.Logger.Info(fmt.Sprintf("Successfully uploaded new Azure SEV-SNP version: %+v", minVersion))
return nil
}

View file

@ -33,16 +33,15 @@ import (
"encoding/json"
"errors"
"fmt"
"log/slog"
"strings"
"time"
s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/sigstore"
"github.com/edgelesssys/constellation/v2/internal/staticupload"
"go.uber.org/zap"
)
// Client is the a general client for all APIs.
@ -54,13 +53,13 @@ type Client struct {
dirtyPaths []string // written paths to be invalidated
DryRun bool // no write operations are performed
Logger *logger.Logger
Logger *slog.Logger
}
// NewReadOnlyClient creates a new read-only client.
// This client can be used to fetch objects but cannot write updates.
func NewReadOnlyClient(ctx context.Context, region, bucket, distributionID string,
log *logger.Logger,
log *slog.Logger,
) (*Client, CloseFunc, error) {
staticUploadClient, staticUploadClientClose, err := staticupload.New(ctx, staticupload.Config{
Region: region,
@ -89,7 +88,7 @@ func NewReadOnlyClient(ctx context.Context, region, bucket, distributionID strin
// NewClient creates a new client for the versions API.
func NewClient(ctx context.Context, region, bucket, distributionID string, dryRun bool,
log *logger.Logger,
log *slog.Logger,
) (*Client, CloseFunc, error) {
staticUploadClient, staticUploadClientClose, err := staticupload.New(ctx, staticupload.Config{
Region: region,
@ -120,7 +119,7 @@ func NewClient(ctx context.Context, region, bucket, distributionID string, dryRu
// It invalidates the CDN cache for all uploaded files.
func (c *Client) Close(ctx context.Context) error {
if c.s3ClientClose == nil {
c.Logger.Debugf("Client has no s3ClientClose")
c.Logger.Debug("Client has no s3ClientClose")
return nil
}
return c.s3ClientClose(ctx)
@ -132,7 +131,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error {
Bucket: &c.bucket,
Prefix: &path,
}
c.Logger.Debugf("Listing objects in %s", path)
c.Logger.Debug("Listing objects in %s", path)
objs := []s3types.Object{}
out := &s3.ListObjectsV2Output{IsTruncated: ptr(true)}
for out.IsTruncated != nil && *out.IsTruncated {
@ -143,10 +142,10 @@ func (c *Client) DeletePath(ctx context.Context, path string) error {
}
objs = append(objs, out.Contents...)
}
c.Logger.Debugf("Found %d objects in %s", len(objs), path)
c.Logger.Debug("Found %d objects in %s", len(objs), path)
if len(objs) == 0 {
c.Logger.Warnf("Path %s is already empty", path)
c.Logger.Warn("Path %s is already empty", path)
return nil
}
@ -156,7 +155,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error {
}
if c.DryRun {
c.Logger.Debugf("DryRun: Deleting %d objects with IDs %v", len(objs), objIDs)
c.Logger.Debug("DryRun: Deleting %d objects with IDs %v", len(objs), objIDs)
return nil
}
@ -168,7 +167,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error {
Objects: objIDs,
},
}
c.Logger.Debugf("Deleting %d objects in %s", len(objs), path)
c.Logger.Debug("Deleting %d objects in %s", len(objs), path)
if _, err := c.s3Client.DeleteObjects(ctx, deleteIn); err != nil {
return fmt.Errorf("deleting objects in %s: %w", path, err)
}
@ -198,7 +197,7 @@ func Fetch[T APIObject](ctx context.Context, c *Client, obj T) (T, error) {
Key: ptr(obj.JSONPath()),
}
c.Logger.Debugf("Fetching %T from s3: %s", obj, obj.JSONPath())
c.Logger.Debug("Fetching %T from s3: %s", obj, obj.JSONPath())
out, err := c.s3Client.GetObject(ctx, in)
var noSuchkey *s3types.NoSuchKey
if errors.As(err, &noSuchkey) {
@ -232,7 +231,7 @@ func Update(ctx context.Context, c *Client, obj APIObject) error {
}
if c.DryRun {
c.Logger.With(zap.String("bucket", c.bucket), zap.String("key", obj.JSONPath()), zap.String("body", string(rawJSON))).Debugf("DryRun: s3 put object")
c.Logger.With(slog.String("bucket", c.bucket), slog.String("key", obj.JSONPath()), slog.String("body", string(rawJSON))).Debug("DryRun: s3 put object")
return nil
}
@ -244,7 +243,7 @@ func Update(ctx context.Context, c *Client, obj APIObject) error {
c.dirtyPaths = append(c.dirtyPaths, "/"+obj.JSONPath())
c.Logger.Debugf("Uploading %T to s3: %v", obj, obj.JSONPath())
c.Logger.Debug("Uploading %T to s3: %v", obj, obj.JSONPath())
if _, err := c.Upload(ctx, in); err != nil {
return fmt.Errorf("uploading %T: %w", obj, err)
}
@ -307,7 +306,7 @@ func Delete(ctx context.Context, c *Client, obj APIObject) error {
Key: ptr(obj.JSONPath()),
}
c.Logger.Debugf("Deleting %T from s3: %s", obj, obj.JSONPath())
c.Logger.Debug("Deleting %T from s3: %s", obj, obj.JSONPath())
if _, err := c.DeleteObject(ctx, in); err != nil {
return fmt.Errorf("deleting s3 object at %s: %w", obj.JSONPath(), err)
}

View file

@ -10,12 +10,12 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"os"
apiclient "github.com/edgelesssys/constellation/v2/internal/api/client"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
"golang.org/x/mod/semver"
)
@ -52,21 +52,21 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
log.Debugf("Validating flags")
log.Debug("Validating flags")
if err := flags.validate(log); err != nil {
return err
}
log.Debugf("Creating version struct")
log.Debug("Creating version struct")
ver, err := versionsapi.NewVersion(flags.ref, flags.stream, flags.version, flags.kind)
if err != nil {
return fmt.Errorf("creating version: %w", err)
}
log.Debugf("Creating versions API client")
log.Debug("Creating versions API client")
client, clientClose, err := versionsapi.NewClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, flags.dryRun, log)
if err != nil {
return fmt.Errorf("creating client: %w", err)
@ -78,7 +78,7 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) {
}
}()
log.Infof("Adding version")
log.Info("Adding version")
if err := ensureVersion(cmd.Context(), client, flags.kind, ver, versionsapi.GranularityMajor, log); err != nil {
return err
}
@ -93,14 +93,14 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) {
}
}
log.Infof("List major->minor URL: %s", ver.ListURL(versionsapi.GranularityMajor))
log.Infof("List minor->patch URL: %s", ver.ListURL(versionsapi.GranularityMinor))
log.Info("List major->minor URL: %s", ver.ListURL(versionsapi.GranularityMajor))
log.Info("List minor->patch URL: %s", ver.ListURL(versionsapi.GranularityMinor))
return nil
}
func ensureVersion(ctx context.Context, client *versionsapi.Client, kind versionsapi.VersionKind, ver versionsapi.Version, gran versionsapi.Granularity,
log *logger.Logger,
log *slog.Logger,
) error {
verListReq := versionsapi.List{
Ref: ver.Ref(),
@ -112,34 +112,34 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, kind version
verList, err := client.FetchVersionList(ctx, verListReq)
var notFoundErr *apiclient.NotFoundError
if errors.As(err, &notFoundErr) {
log.Infof("Version list for %s versions under %q does not exist. Creating new list", gran.String(), ver.Major())
log.Info("Version list for %s versions under %q does not exist. Creating new list", gran.String(), ver.Major())
verList = verListReq
} else if err != nil {
return fmt.Errorf("failed to list minor versions: %w", err)
}
log.Debugf("%s version list: %v", gran.String(), verList)
log.Debug("%s version list: %v", gran.String(), verList)
insertGran := gran + 1
insertVersion := ver.WithGranularity(insertGran)
if verList.Contains(insertVersion) {
log.Infof("Version %q already exists in list %v", insertVersion, verList.Versions)
log.Info("Version %q already exists in list %v", insertVersion, verList.Versions)
return nil
}
log.Infof("Inserting %s version %q into list", insertGran.String(), insertVersion)
log.Info("Inserting %s version %q into list", insertGran.String(), insertVersion)
verList.Versions = append(verList.Versions, insertVersion)
log.Debugf("New %s version list: %v", gran.String(), verList)
log.Debug("New %s version list: %v", gran.String(), verList)
if err := client.UpdateVersionList(ctx, verList); err != nil {
return fmt.Errorf("failed to add %s version: %w", gran.String(), err)
}
log.Infof("Added %q to list", insertVersion)
log.Info("Added %q to list", insertVersion)
return nil
}
func updateLatest(ctx context.Context, client *versionsapi.Client, kind versionsapi.VersionKind, ver versionsapi.Version, log *logger.Logger) error {
func updateLatest(ctx context.Context, client *versionsapi.Client, kind versionsapi.VersionKind, ver versionsapi.Version, log *slog.Logger) error {
latest := versionsapi.Latest{
Ref: ver.Ref(),
Stream: ver.Stream(),
@ -148,17 +148,17 @@ func updateLatest(ctx context.Context, client *versionsapi.Client, kind versions
latest, err := client.FetchVersionLatest(ctx, latest)
var notFoundErr *apiclient.NotFoundError
if errors.As(err, &notFoundErr) {
log.Debugf("Latest version for ref %q and stream %q not found", ver.Ref(), ver.Stream())
log.Debug("Latest version for ref %q and stream %q not found", ver.Ref(), ver.Stream())
} else if err != nil {
return fmt.Errorf("fetching latest version: %w", err)
}
if latest.Version == ver.Version() {
log.Infof("Version %q is already latest version", ver)
log.Info("Version %q is already latest version", ver)
return nil
}
log.Infof("Setting %q as latest version", ver)
log.Info("Setting %q as latest version", ver)
latest = versionsapi.Latest{
Ref: ver.Ref(),
Stream: ver.Stream(),
@ -183,10 +183,10 @@ type addFlags struct {
bucket string
distributionID string
kind versionsapi.VersionKind
logLevel zapcore.Level
logLevel slog.Level
}
func (f *addFlags) validate(log *logger.Logger) error {
func (f *addFlags) validate(log *slog.Logger) error {
if !semver.IsValid(f.version) {
return fmt.Errorf("version %q is not a valid semantic version", f.version)
}
@ -203,10 +203,10 @@ func (f *addFlags) validate(log *logger.Logger) error {
}
if f.release {
log.Debugf("Setting ref to %q, as release flag is set", versionsapi.ReleaseRef)
log.Debug("Setting ref to %q, as release flag is set", versionsapi.ReleaseRef)
f.ref = versionsapi.ReleaseRef
} else {
log.Debugf("Setting latest to true, as release flag is not set")
log.Debug("Setting latest to true, as release flag is not set")
f.latest = true // always set latest for non-release versions
}
@ -256,9 +256,9 @@ func parseAddFlags(cmd *cobra.Command) (addFlags, error) {
if err != nil {
return addFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
region, err := cmd.Flags().GetString("region")
if err != nil {

View file

@ -10,11 +10,11 @@ import (
"encoding/json"
"errors"
"fmt"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newLatestCmd() *cobra.Command {
@ -38,15 +38,15 @@ func runLatest(cmd *cobra.Command, _ []string) (retErr error) {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
log.Debugf("Validating flags")
log.Debug("Validating flags")
if err := flags.validate(); err != nil {
return err
}
log.Debugf("Creating versions API client")
log.Debug("Creating versions API client")
client, clientClose, err := versionsapi.NewReadOnlyClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
return fmt.Errorf("creating client: %w", err)
@ -58,7 +58,7 @@ func runLatest(cmd *cobra.Command, _ []string) (retErr error) {
}
}()
log.Debugf("Requesting latest version")
log.Debug("Requesting latest version")
latest := versionsapi.Latest{
Ref: flags.ref,
Stream: flags.stream,
@ -89,7 +89,7 @@ type latestFlags struct {
region string
bucket string
distributionID string
logLevel zapcore.Level
logLevel slog.Level
}
func (l *latestFlags) validate() error {
@ -133,9 +133,9 @@ func parseLatestFlags(cmd *cobra.Command) (latestFlags, error) {
if err != nil {
return latestFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return latestFlags{

View file

@ -11,14 +11,14 @@ import (
"encoding/json"
"errors"
"fmt"
"os"
"log/slog"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
"golang.org/x/mod/semver"
apiclient "github.com/edgelesssys/constellation/v2/internal/api/client"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/logger"
)
func newListCmd() *cobra.Command {
@ -43,15 +43,15 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
log.Debugf("Validating flags")
log.Debug("Validating flags")
if err := flags.validate(); err != nil {
return err
}
log.Debugf("Creating versions API client")
log.Debug("Creating versions API client")
client, clientClose, err := versionsapi.NewReadOnlyClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
return fmt.Errorf("creating client: %w", err)
@ -67,29 +67,29 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) {
if flags.minorVersion != "" {
minorVersions = []string{flags.minorVersion}
} else {
log.Debugf("Getting minor versions")
log.Debug("Getting minor versions")
minorVersions, err = listMinorVersions(cmd.Context(), client, flags.ref, flags.stream)
var errNotFound *apiclient.NotFoundError
if err != nil && errors.As(err, &errNotFound) {
log.Infof("No minor versions found for ref %q and stream %q.", flags.ref, flags.stream)
log.Info("No minor versions found for ref %q and stream %q.", flags.ref, flags.stream)
return nil
} else if err != nil {
return err
}
}
log.Debugf("Getting patch versions")
log.Debug("Getting patch versions")
patchVersions, err := listPatchVersions(cmd.Context(), client, flags.ref, flags.stream, minorVersions)
var errNotFound *apiclient.NotFoundError
if err != nil && errors.As(err, &errNotFound) {
log.Infof("No patch versions found for ref %q, stream %q and minor versions %v.", flags.ref, flags.stream, minorVersions)
log.Info("No patch versions found for ref %q, stream %q and minor versions %v.", flags.ref, flags.stream, minorVersions)
return nil
} else if err != nil {
return err
}
if flags.json {
log.Debugf("Printing versions as JSON")
log.Debug("Printing versions as JSON")
var vers []string
for _, v := range patchVersions {
vers = append(vers, v.Version())
@ -102,7 +102,7 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) {
return nil
}
log.Debugf("Printing versions")
log.Debug("Printing versions")
for _, v := range patchVersions {
fmt.Println(v.ShortPath())
}
@ -158,7 +158,7 @@ type listFlags struct {
bucket string
distributionID string
json bool
logLevel zapcore.Level
logLevel slog.Level
}
func (l *listFlags) validate() error {
@ -211,9 +211,9 @@ func parseListFlags(cmd *cobra.Command) (listFlags, error) {
if err != nil {
return listFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return listFlags{

View file

@ -12,6 +12,8 @@ import (
"fmt"
"io"
"log"
"log/slog"
"os"
"regexp"
"strings"
"time"
@ -26,10 +28,8 @@ import (
"github.com/aws/smithy-go"
apiclient "github.com/edgelesssys/constellation/v2/internal/api/client"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/logger"
gaxv2 "github.com/googleapis/gax-go/v2"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newRemoveCmd() *cobra.Command {
@ -74,33 +74,33 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags)
log.Debugf("Validating flags")
log.Debug("Validating flags")
if err := flags.validate(); err != nil {
return err
}
log.Debugf("Creating GCP client")
log.Debug("Creating GCP client")
gcpClient, err := newGCPClient(cmd.Context(), flags.gcpProject)
if err != nil {
return fmt.Errorf("creating GCP client: %w", err)
}
log.Debugf("Creating AWS client")
log.Debug("Creating AWS client")
awsClient, err := newAWSClient()
if err != nil {
return fmt.Errorf("creating AWS client: %w", err)
}
log.Debugf("Creating Azure client")
log.Debug("Creating Azure client")
azClient, err := newAzureClient(flags.azSubscription, flags.azLocation, flags.azResourceGroup)
if err != nil {
return fmt.Errorf("creating Azure client: %w", err)
}
log.Debugf("Creating versions API client")
log.Debug("Creating versions API client")
verclient, verclientClose, err := versionsapi.NewClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, flags.dryrun, log)
if err != nil {
return fmt.Errorf("creating client: %w", err)
@ -120,14 +120,14 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) {
}
if flags.all {
log.Infof("Deleting ref %s", flags.ref)
log.Info("Deleting ref %s", flags.ref)
if err := deleteRef(cmd.Context(), imageClients, flags.ref, flags.dryrun, log); err != nil {
return fmt.Errorf("deleting ref: %w", err)
}
return nil
}
log.Infof("Deleting single version %s", flags.ver.ShortPath())
log.Info("Deleting single version %s", flags.ver.ShortPath())
if err := deleteSingleVersion(cmd.Context(), imageClients, flags.ver, flags.dryrun, log); err != nil {
return fmt.Errorf("deleting single version: %w", err)
}
@ -135,15 +135,15 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) {
return nil
}
func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *logger.Logger) error {
func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *slog.Logger) error {
var retErr error
log.Debugf("Deleting images for %s", ver.Version)
log.Debug("Deleting images for %s", ver.Version)
if err := deleteImage(ctx, clients, ver, dryrun, log); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting images: %w", err))
}
log.Debugf("Deleting version %s from versions API", ver.Version)
log.Debug("Deleting version %s from versions API", ver.Version)
if err := clients.version.DeleteVersion(ctx, ver); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting version from versions API: %w", err))
}
@ -151,15 +151,15 @@ func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versio
return retErr
}
func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun bool, log *logger.Logger) error {
func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun bool, log *slog.Logger) error {
var vers []versionsapi.Version
for _, stream := range []string{"nightly", "console", "debug"} {
log.Infof("Listing versions of stream %s", stream)
log.Info("Listing versions of stream %s", stream)
minorVersions, err := listMinorVersions(ctx, clients.version, ref, stream)
var notFoundErr *apiclient.NotFoundError
if errors.As(err, &notFoundErr) {
log.Debugf("No minor versions found for stream %s", stream)
log.Debug("No minor versions found for stream %s", stream)
continue
} else if err != nil {
return fmt.Errorf("listing minor versions for stream %s: %w", stream, err)
@ -167,7 +167,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b
patchVersions, err := listPatchVersions(ctx, clients.version, ref, stream, minorVersions)
if errors.As(err, &notFoundErr) {
log.Debugf("No patch versions found for stream %s", stream)
log.Debug("No patch versions found for stream %s", stream)
continue
} else if err != nil {
return fmt.Errorf("listing patch versions for stream %s: %w", stream, err)
@ -175,7 +175,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b
vers = append(vers, patchVersions...)
}
log.Infof("Found %d versions to delete", len(vers))
log.Info("Found %d versions to delete", len(vers))
var retErr error
@ -185,7 +185,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b
}
}
log.Infof("Deleting ref %s from versions API", ref)
log.Info("Deleting ref %s from versions API", ref)
if err := clients.version.DeleteRef(ctx, ref); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting ref from versions API: %w", err))
}
@ -193,7 +193,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b
return retErr
}
func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *logger.Logger) error {
func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *slog.Logger) error {
var retErr error
imageInfo := versionsapi.ImageInfo{
@ -204,8 +204,8 @@ func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Ve
imageInfo, err := clients.version.FetchImageInfo(ctx, imageInfo)
var notFound *apiclient.NotFoundError
if errors.As(err, &notFound) {
log.Warnf("Image info for %s not found", ver.Version)
log.Warnf("Skipping image deletion")
log.Warn("Image info for %s not found", ver.Version)
log.Warn("Skipping image deletion")
return nil
} else if err != nil {
return fmt.Errorf("fetching image info: %w", err)
@ -214,17 +214,17 @@ func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Ve
for _, entry := range imageInfo.List {
switch entry.CSP {
case "aws":
log.Infof("Deleting AWS images from %s", imageInfo.JSONPath())
log.Info("Deleting AWS images from %s", imageInfo.JSONPath())
if err := clients.aws.deleteImage(ctx, entry.Reference, entry.Region, dryrun, log); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting AWS image %s: %w", entry.Reference, err))
}
case "gcp":
log.Infof("Deleting GCP images from %s", imageInfo.JSONPath())
log.Info("Deleting GCP images from %s", imageInfo.JSONPath())
if err := clients.gcp.deleteImage(ctx, entry.Reference, dryrun, log); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting GCP image %s: %w", entry.Reference, err))
}
case "azure":
log.Infof("Deleting Azure images from %s", imageInfo.JSONPath())
log.Info("Deleting Azure images from %s", imageInfo.JSONPath())
if err := clients.az.deleteImage(ctx, entry.Reference, dryrun, log); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting Azure image %s: %w", entry.Reference, err))
}
@ -259,7 +259,7 @@ type rmFlags struct {
azSubscription string
azLocation string
azResourceGroup string
logLevel zapcore.Level
logLevel slog.Level
ver versionsapi.Version
}
@ -358,9 +358,9 @@ func parseRmFlags(cmd *cobra.Command) (*rmFlags, error) {
if err != nil {
return nil, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return &rmFlags{
@ -400,17 +400,17 @@ type ec2API interface {
) (*ec2.DeleteSnapshotOutput, error)
}
func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, dryrun bool, log *logger.Logger) error {
func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, dryrun bool, log *slog.Logger) error {
cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region))
if err != nil {
return err
}
a.ec2 = ec2.NewFromConfig(cfg)
log.Debugf("Deleting resources in AWS region %s", region)
log.Debug("Deleting resources in AWS region %s", region)
snapshotID, err := a.getSnapshotID(ctx, ami, log)
if err != nil {
log.Warnf("Failed to get AWS snapshot ID for image %s: %v", ami, err)
log.Warn("Failed to get AWS snapshot ID for image %s: %v", ami, err)
}
if err := a.deregisterImage(ctx, ami, dryrun, log); err != nil {
@ -426,8 +426,8 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string,
return nil
}
func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool, log *logger.Logger) error {
log.Debugf("Deregistering image %s", ami)
func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool, log *slog.Logger) error {
log.Debug("Deregistering image %s", ami)
deregisterReq := ec2.DeregisterImageInput{
ImageId: &ami,
@ -438,15 +438,15 @@ func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool
if errors.As(err, &apiErr) &&
(apiErr.ErrorCode() == "InvalidAMIID.NotFound" ||
apiErr.ErrorCode() == "InvalidAMIID.Unavailable") {
log.Warnf("AWS image %s not found", ami)
log.Warn("AWS image %s not found", ami)
return nil
}
return err
}
func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *logger.Logger) (string, error) {
log.Debugf("Describing image %s", ami)
func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Logger) (string, error) {
log.Debug("Describing image %s", ami)
req := ec2.DescribeImagesInput{
ImageIds: []string{ami},
@ -481,8 +481,8 @@ func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *logger.L
return snapshotID, nil
}
func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryrun bool, log *logger.Logger) error {
log.Debugf("Deleting AWS snapshot %s", snapshotID)
func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryrun bool, log *slog.Logger) error {
log.Debug("Deleting AWS snapshot %s", snapshotID)
req := ec2.DeleteSnapshotInput{
SnapshotId: &snapshotID,
@ -493,7 +493,7 @@ func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryru
if errors.As(err, &apiErr) &&
(apiErr.ErrorCode() == "InvalidSnapshot.NotFound" ||
apiErr.ErrorCode() == "InvalidSnapshot.Unavailable") {
log.Warnf("AWS snapshot %s not found", snapshotID)
log.Warn("AWS snapshot %s not found", snapshotID)
return nil
}
@ -523,7 +523,7 @@ type gcpComputeAPI interface {
io.Closer
}
func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun bool, log *logger.Logger) error {
func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun bool, log *slog.Logger) error {
// Extract image name from image URI
// Expected input into function: "projects/constellation-images/global/images/v2-6-0-stable"
// Required for computepb.DeleteImageRequest: "v2-6-0-stable"
@ -536,20 +536,20 @@ func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun boo
}
if dryrun {
log.Debugf("DryRun: delete image request: %v", req)
log.Debug("DryRun: delete image request: %v", req)
return nil
}
log.Debugf("Deleting image %s", image)
log.Debug("Deleting image %s", image)
op, err := g.compute.Delete(ctx, req)
if err != nil && strings.Contains(err.Error(), "404") {
log.Warnf("GCP image %s not found", image)
log.Warn("GCP image %s not found", image)
return nil
} else if err != nil {
return fmt.Errorf("deleting image %s: %w", image, err)
}
log.Debugf("Waiting for operation to finish")
log.Debug("Waiting for operation to finish")
if err := op.Wait(ctx); err != nil {
return fmt.Errorf("waiting for operation: %w", err)
}
@ -624,30 +624,30 @@ var (
azCommunityImageRegex = regexp.MustCompile("^/CommunityGalleries/([[:alnum:]-]+)/Images/([[:alnum:]._-]+)/Versions/([[:alnum:]._-]+)$")
)
func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool, log *logger.Logger) error {
func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool, log *slog.Logger) error {
azImage, err := a.parseImage(ctx, image, log)
if err != nil {
return err
}
if dryrun {
log.Debugf("DryRun: delete image %v", azImage)
log.Debug("DryRun: delete image %v", azImage)
return nil
}
log.Debugf("Deleting image %q, version %q", azImage.imageDefinition, azImage.version)
log.Debug("Deleting image %q, version %q", azImage.imageDefinition, azImage.version)
poller, err := a.imageVersions.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery,
azImage.imageDefinition, azImage.version, nil)
if err != nil {
return fmt.Errorf("begin delete image version: %w", err)
}
log.Debugf("Waiting for operation to finish")
log.Debug("Waiting for operation to finish")
if _, err := poller.PollUntilDone(ctx, nil); err != nil {
return fmt.Errorf("waiting for operation: %w", err)
}
log.Debugf("Checking if image definition %q still has versions left", azImage.imageDefinition)
log.Debug("Checking if image definition %q still has versions left", azImage.imageDefinition)
pager := a.imageVersions.NewListByGalleryImagePager(azImage.resourceGroup, azImage.gallery,
azImage.imageDefinition, nil)
for pager.More() {
@ -656,20 +656,20 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool
return fmt.Errorf("listing image versions of image definition %s: %w", azImage.imageDefinition, err)
}
if len(nextResult.Value) != 0 {
log.Debugf("Image definition %q still has versions left, won't be deleted", azImage.imageDefinition)
log.Debug("Image definition %q still has versions left, won't be deleted", azImage.imageDefinition)
return nil
}
}
time.Sleep(15 * time.Second) // Azure needs time understand that there is no version left...
log.Debugf("Deleting image definition %s", azImage.imageDefinition)
log.Debug("Deleting image definition %s", azImage.imageDefinition)
op, err := a.image.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, nil)
if err != nil {
return fmt.Errorf("deleting image definition %s: %w", azImage.imageDefinition, err)
}
log.Debugf("Waiting for operation to finish")
log.Debug("Waiting for operation to finish")
if _, err := op.PollUntilDone(ctx, nil); err != nil {
return fmt.Errorf("waiting for operation: %w", err)
}
@ -684,9 +684,9 @@ type azImage struct {
version string
}
func (a *azureClient) parseImage(ctx context.Context, image string, log *logger.Logger) (azImage, error) {
func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Logger) (azImage, error) {
if m := azImageRegex.FindStringSubmatch(image); len(m) == 5 {
log.Debugf(
log.Debug(
"Image matches local image format, resource group: %s, gallery: %s, image definition: %s, version: %s",
m[1], m[2], m[3], m[4],
)
@ -707,7 +707,7 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *logger.
imageDefinition := m[2]
version := m[3]
log.Debugf(
log.Debug(
"Image matches community image format, gallery public name: %s, image definition: %s, version: %s",
galleryPublicName, imageDefinition, version,
)
@ -721,24 +721,24 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *logger.
}
for _, v := range nextResult.Value {
if v.Name == nil {
log.Debugf("Skipping gallery with nil name")
log.Debug("Skipping gallery with nil name")
continue
}
if v.Properties.SharingProfile == nil {
log.Debugf("Skipping gallery %s with nil sharing profile", *v.Name)
log.Debug("Skipping gallery %s with nil sharing profile", *v.Name)
continue
}
if v.Properties.SharingProfile.CommunityGalleryInfo == nil {
log.Debugf("Skipping gallery %s with nil community gallery info", *v.Name)
log.Debug("Skipping gallery %s with nil community gallery info", *v.Name)
continue
}
if v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames == nil {
log.Debugf("Skipping gallery %s with nil public names", *v.Name)
log.Debug("Skipping gallery %s with nil public names", *v.Name)
continue
}
for _, publicName := range v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames {
if publicName == nil {
log.Debugf("Skipping nil public name")
log.Debug("Skipping nil public name")
continue
}
if *publicName == galleryPublicName {

View file

@ -10,13 +10,13 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"path"
"golang.org/x/mod/semver"
apiclient "github.com/edgelesssys/constellation/v2/internal/api/client"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/logger"
)
// Client is a client for the versions API.
@ -27,7 +27,7 @@ type Client struct {
// NewClient creates a new client for the versions API.
func NewClient(ctx context.Context, region, bucket, distributionID string, dryRun bool,
log *logger.Logger,
log *slog.Logger,
) (*Client, CloseFunc, error) {
genericClient, genericClientClose, err := apiclient.NewClient(ctx, region, bucket, distributionID, dryRun, log)
versionsClient := &Client{
@ -43,7 +43,7 @@ func NewClient(ctx context.Context, region, bucket, distributionID string, dryRu
// NewReadOnlyClient creates a new read-only client.
// This client can be used to fetch objects but cannot write updates.
func NewReadOnlyClient(ctx context.Context, region, bucket, distributionID string,
log *logger.Logger,
log *slog.Logger,
) (*Client, CloseFunc, error) {
genericClient, genericClientClose, err := apiclient.NewReadOnlyClient(ctx, region, bucket, distributionID, log)
if err != nil {

View file

@ -45,8 +45,8 @@ const (
// Logger is a logger used to print warnings and infos during attestation validation.
type Logger interface {
Infof(format string, args ...any)
Warnf(format string, args ...any)
Info(format string, args ...any)
Warn(format string, args ...any)
}
// NOPLogger is a no-op implementation of [Logger].

View file

@ -17,6 +17,7 @@ import (
"encoding/pem"
"errors"
"fmt"
"log/slog"
"regexp"
"testing"
@ -161,7 +162,7 @@ func TestValidateSNPReport(t *testing.T) {
require.NoError(err)
v := awsValidator{httpsGetter: newStubHTTPSGetter(&urlResponseMatcher{}, nil), verifier: tc.verifier, validator: tc.validator}
err = v.validate(vtpm.AttestationDocument{InstanceInfo: infoMarshalled}, ask, ark, [64]byte(hash), config.DefaultForAWSSEVSNP(), logger.NewTest(t))
err = v.validate(vtpm.AttestationDocument{InstanceInfo: infoMarshalled}, ask, ark, [64]byte(hash), config.DefaultForAWSSEVSNP(), slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)))
if tc.wantErr {
assert.Error(err)
} else {

View file

@ -48,7 +48,7 @@ func TestNewValidator(t *testing.T) {
}{
"success": {
cfg: config.DefaultForAzureSEVSNP(),
logger: logger.NewTest(t),
logger: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
},
"nil logger": {
cfg: config.DefaultForAzureSEVSNP(),
@ -127,7 +127,7 @@ func TestCheckIDKeyDigest(t *testing.T) {
return report
}
newTestValidator := func(cfg *config.AzureSEVSNP, validateTokenErr error) *Validator {
validator := NewValidator(cfg, logger.NewTest(t))
validator := NewValidator(cfg, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)))
validator.maa = &stubMaaValidator{
validateTokenErr: validateTokenErr,
}
@ -644,7 +644,7 @@ func TestTrustedKeyFromSNP(t *testing.T) {
validator := &Validator{
hclValidator: &stubAttestationKey{},
config: defaultCfg,
log: logger.NewTest(t),
log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)),
getter: tc.getter,
attestationVerifier: tc.verifier,
attestationValidator: tc.validator,

View file

@ -14,6 +14,7 @@ import (
"crypto/x509"
"crypto/x509/pkix"
"io"
"log/slog"
"net/http"
"os"
"testing"
@ -189,7 +190,7 @@ func TestGetAttestationCert(t *testing.T) {
))
require.NoError(tpm2.NVWrite(tpm, tpm2.HandleOwner, tpmAkCertIdx, "", akCert, 0))
issuer := NewIssuer(logger.NewTest(t))
issuer := NewIssuer(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)))
issuer.hClient = newTestClient(tc.crlServer)
certs, err := issuer.getAttestationCert(context.Background(), tpm, nil)

View file

@ -266,7 +266,7 @@ func TestAttestationWithCerts(t *testing.T) {
}
defer trust.ClearProductCertCache()
att, err := instanceInfo.AttestationWithCerts(tc.getter, tc.fallbackCerts, logger.NewTest(t))
att, err := instanceInfo.AttestationWithCerts(tc.getter, tc.fallbackCerts, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)))
if tc.wantErr {
assert.Error(err)
} else {

Some files were not shown because too many files have changed in this diff Show more