fixed some general problems

imports, wrong logging calls and similar stuff. Unit tests work now
This commit is contained in:
miampf 2024-01-10 14:24:30 +01:00
parent c1168588e6
commit 69cbd155db
No known key found for this signature in database
GPG key ID: 376EAC0E5307A669
99 changed files with 342 additions and 329 deletions

View file

@ -7,10 +7,11 @@ SPDX-License-Identifier: AGPL-3.0-only
package main package main
import ( import (
"context" "context"
"log/slog" "fmt"
"net" "log/slog"
"os" "net"
"os"
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/clean" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/clean"
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/diskencryption" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/diskencryption"

View file

@ -8,9 +8,9 @@ package main
import ( import (
"context" "context"
"log/slog"
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/role"
"github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/internal/versions/components"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
@ -22,13 +22,13 @@ type clusterFake struct{}
// InitCluster fakes bootstrapping a new cluster with the current node being the master, returning the arguments required to join the cluster. // InitCluster fakes bootstrapping a new cluster with the current node being the master, returning the arguments required to join the cluster.
func (c *clusterFake) InitCluster( func (c *clusterFake) InitCluster(
context.Context, string, string, context.Context, string, string,
bool, components.Components, []string, string, *logger.Logger, bool, components.Components, []string, string, *slog.Logger,
) ([]byte, error) { ) ([]byte, error) {
return []byte{}, nil return []byte{}, nil
} }
// JoinCluster will fake joining the current node to an existing cluster. // JoinCluster will fake joining the current node to an existing cluster.
func (c *clusterFake) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components, *logger.Logger) error { func (c *clusterFake) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components, *slog.Logger) error {
return nil return nil
} }

View file

@ -18,6 +18,7 @@ import (
"time" "time"
"log/slog" "log/slog"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto" "github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/attestation/variant"
@ -25,7 +26,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
kmssetup "github.com/edgelesssys/constellation/v2/internal/kms/setup" kmssetup "github.com/edgelesssys/constellation/v2/internal/kms/setup"
"github.com/edgelesssys/constellation/v2/internal/kms/uri" "github.com/edgelesssys/constellation/v2/internal/kms/uri"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/internal/versions/components"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"

View file

@ -34,7 +34,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/nodestate" "github.com/edgelesssys/constellation/v2/internal/nodestate"
"github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/role"
"github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/internal/versions/components"
@ -174,7 +173,7 @@ func (c *JoinClient) Stop() {
return return
} }
c.log.Infof("Stopping") c.log.Info("Stopping")
c.stopC <- struct{}{} c.stopC <- struct{}{}
<-c.stopDone <-c.stopDone
@ -182,7 +181,7 @@ func (c *JoinClient) Stop() {
c.stopC = nil c.stopC = nil
c.stopDone = nil c.stopDone = nil
c.log.Infof("Stopped") c.log.Info("Stopped")
} }
func (c *JoinClient) tryJoinWithAvailableServices() error { func (c *JoinClient) tryJoinWithAvailableServices() error {
@ -423,7 +422,7 @@ type ClusterJoiner interface {
args *kubeadm.BootstrapTokenDiscovery, args *kubeadm.BootstrapTokenDiscovery,
peerRole role.Role, peerRole role.Role,
k8sComponents components.Components, k8sComponents components.Components,
log *logger.Logger, log *slog.Logger,
) error ) error
} }

View file

@ -16,13 +16,13 @@ import (
"testing" "testing"
"time" "time"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials" "github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials"
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer" "github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer" "github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/role"
"github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/internal/versions/components"
"github.com/edgelesssys/constellation/v2/joinservice/joinproto" "github.com/edgelesssys/constellation/v2/joinservice/joinproto"
@ -402,7 +402,7 @@ type stubClusterJoiner struct {
joinClusterErr error joinClusterErr error
} }
func (j *stubClusterJoiner) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components, *logger.Logger) error { func (j *stubClusterJoiner) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components, *slog.Logger) error {
j.joinClusterCalled = true j.joinClusterCalled = true
return j.joinClusterErr return j.joinClusterErr
} }

View file

@ -177,7 +177,7 @@ func (k *KubeWrapper) InitCluster(
return nil, fmt.Errorf("annotating node with Kubernetes components hash: %w", err) return nil, fmt.Errorf("annotating node with Kubernetes components hash: %w", err)
} }
log.Infof("Setting up internal-config ConfigMap") log.Info("Setting up internal-config ConfigMap")
if err := k.setupInternalConfigMap(ctx); err != nil { if err := k.setupInternalConfigMap(ctx); err != nil {
return nil, fmt.Errorf("failed to setup internal ConfigMap: %w", err) return nil, fmt.Errorf("failed to setup internal ConfigMap: %w", err)
} }
@ -186,13 +186,13 @@ func (k *KubeWrapper) InitCluster(
// JoinCluster joins existing Kubernetes cluster. // JoinCluster joins existing Kubernetes cluster.
func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components, log *slog.Logger) error { func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components, log *slog.Logger) error {
log.With("k8sComponents", k8sComponents).Infof("Installing provided kubernetes components") log.With("k8sComponents", k8sComponents).Info("Installing provided kubernetes components")
if err := k.clusterUtil.InstallComponents(ctx, k8sComponents); err != nil { if err := k.clusterUtil.InstallComponents(ctx, k8sComponents); err != nil {
return fmt.Errorf("installing kubernetes components: %w", err) return fmt.Errorf("installing kubernetes components: %w", err)
} }
// Step 1: retrieve cloud metadata for Kubernetes configuration // Step 1: retrieve cloud metadata for Kubernetes configuration
log.Infof("Retrieving node metadata") log.Info("Retrieving node metadata")
instance, err := k.providerMetadata.Self(ctx) instance, err := k.providerMetadata.Self(ctx)
if err != nil { if err != nil {
return fmt.Errorf("retrieving own instance metadata: %w", err) return fmt.Errorf("retrieving own instance metadata: %w", err)

View file

@ -14,12 +14,12 @@ import (
"strconv" "strconv"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi"
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/kubewaiter" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/kubewaiter"
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/kubernetes" "github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/role"
"github.com/edgelesssys/constellation/v2/internal/versions" "github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/internal/versions/components"

View file

@ -18,6 +18,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared" "github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
@ -28,7 +29,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/constellation/state" "github.com/edgelesssys/constellation/v2/internal/constellation/state"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/kms/uri" "github.com/edgelesssys/constellation/v2/internal/kms/uri"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions" "github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/spf13/pflag" "github.com/spf13/pflag"

View file

@ -152,7 +152,7 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements(
return fmt.Errorf("fetching and verifying measurements: %w", err) return fmt.Errorf("fetching and verifying measurements: %w", err)
} }
} }
cfm.log.Debugf("Measurements: %#v\n", fetchedMeasurements) cfm.log.Debug("Measurements: %#v\n", fetchedMeasurements)
cfm.log.Debug("Updating measurements in configuration") cfm.log.Debug("Updating measurements in configuration")
conf.UpdateMeasurements(fetchedMeasurements) conf.UpdateMeasurements(fetchedMeasurements)

View file

@ -13,6 +13,7 @@ import (
"net/url" "net/url"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
@ -21,7 +22,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"

View file

@ -77,7 +77,6 @@ func runConfigGenerate(cmd *cobra.Command, args []string) error {
if err != nil { if err != nil {
return fmt.Errorf("creating logger: %w", err) return fmt.Errorf("creating logger: %w", err)
} }
defer log.Sync()
fileHandler := file.NewHandler(afero.NewOsFs()) fileHandler := file.NewHandler(afero.NewOsFs())
provider := cloudprovider.FromString(args[0]) provider := cloudprovider.FromString(args[0])
@ -86,13 +85,13 @@ func runConfigGenerate(cmd *cobra.Command, args []string) error {
if err := cg.flags.parse(cmd.Flags()); err != nil { if err := cg.flags.parse(cmd.Flags()); err != nil {
return fmt.Errorf("parsing flags: %w", err) return fmt.Errorf("parsing flags: %w", err)
} }
log.Debugf("Parsed flags as %+v", cg.flags) log.Debug("Parsed flags as %+v", cg.flags)
return cg.configGenerate(cmd, fileHandler, provider, args[0]) return cg.configGenerate(cmd, fileHandler, provider, args[0])
} }
func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file.Handler, provider cloudprovider.Provider, rawProvider string) error { func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file.Handler, provider cloudprovider.Provider, rawProvider string) error {
cg.log.Debugf("Using cloud provider %s", provider.String()) cg.log.Debug("Using cloud provider %s", provider.String())
// Config creation // Config creation
conf, err := createConfigWithAttestationVariant(provider, rawProvider, cg.flags.attestationVariant) conf, err := createConfigWithAttestationVariant(provider, rawProvider, cg.flags.attestationVariant)
@ -100,7 +99,7 @@ func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file
return fmt.Errorf("creating config: %w", err) return fmt.Errorf("creating config: %w", err)
} }
conf.KubernetesVersion = cg.flags.k8sVersion conf.KubernetesVersion = cg.flags.k8sVersion
cg.log.Debugf("Writing YAML data to configuration file") cg.log.Debug("Writing YAML data to configuration file")
if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptMkdirAll); err != nil { if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptMkdirAll); err != nil {
return fmt.Errorf("writing config file: %w", err) return fmt.Errorf("writing config file: %w", err)
} }

View file

@ -12,13 +12,13 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/constellation/state" "github.com/edgelesssys/constellation/v2/internal/constellation/state"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions" "github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"

View file

@ -12,12 +12,12 @@ import (
"log/slog" "log/slog"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/constellation/state" "github.com/edgelesssys/constellation/v2/internal/constellation/state"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
consemver "github.com/edgelesssys/constellation/v2/internal/semver" consemver "github.com/edgelesssys/constellation/v2/internal/semver"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"

View file

@ -12,12 +12,12 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd" "github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"

View file

@ -10,10 +10,10 @@ import (
"log/slog" "log/slog"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared" "github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"

View file

@ -13,6 +13,7 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform" "github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/attestation/variant"
@ -20,7 +21,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"

View file

@ -15,6 +15,7 @@ import (
"time" "time"
"log/slog" "log/slog"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto" "github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix" "github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
@ -27,7 +28,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/constellation/state" "github.com/edgelesssys/constellation/v2/internal/constellation/state"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/kms/uri" "github.com/edgelesssys/constellation/v2/internal/kms/uri"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/semver" "github.com/edgelesssys/constellation/v2/internal/semver"
"github.com/edgelesssys/constellation/v2/internal/versions" "github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/spf13/afero" "github.com/spf13/afero"

View file

@ -11,7 +11,7 @@ import (
"log/slog" "log/slog"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"

View file

@ -16,6 +16,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/disk-mapper/recoverproto" "github.com/edgelesssys/constellation/v2/disk-mapper/recoverproto"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
@ -28,7 +29,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer" "github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer" "github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
"github.com/edgelesssys/constellation/v2/internal/kms/uri" "github.com/edgelesssys/constellation/v2/internal/kms/uri"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"

View file

@ -12,10 +12,10 @@ import (
"log/slog" "log/slog"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/constellation/state" "github.com/edgelesssys/constellation/v2/internal/constellation/state"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"

View file

@ -12,6 +12,7 @@ import (
"log/slog" "log/slog"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd" "github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
@ -22,7 +23,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/constellation/state" "github.com/edgelesssys/constellation/v2/internal/constellation/state"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/kms/uri" "github.com/edgelesssys/constellation/v2/internal/kms/uri"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/semver" "github.com/edgelesssys/constellation/v2/internal/semver"
"github.com/edgelesssys/constellation/v2/internal/versions" "github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/spf13/afero" "github.com/spf13/afero"

View file

@ -16,6 +16,7 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/attestation/variant"
@ -23,7 +24,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
consemver "github.com/edgelesssys/constellation/v2/internal/semver" consemver "github.com/edgelesssys/constellation/v2/internal/semver"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"

View file

@ -20,6 +20,7 @@ import (
"testing" "testing"
"log/slog" "log/slog"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/attestation/variant"
@ -30,7 +31,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer" "github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer" "github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/verify/verifyproto" "github.com/edgelesssys/constellation/v2/verify/verifyproto"
tpmProto "github.com/google/go-tpm-tools/proto/tpm" tpmProto "github.com/google/go-tpm-tools/proto/tpm"
"github.com/spf13/afero" "github.com/spf13/afero"

View file

@ -97,7 +97,7 @@ func main() {
fetcher = cloudprovider.New(qemucloud.New()) fetcher = cloudprovider.New(qemucloud.New())
default: default:
log.Error("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp) log.Error(fmt.Sprintf("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp))
fetcher = fallback.NewFallbackFetcher() fetcher = fallback.NewFallbackFetcher()
} }

View file

@ -10,7 +10,9 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"os"
"path/filepath" "path/filepath"
"strconv" "strconv"
"sync" "sync"
@ -60,7 +62,7 @@ func runDeploy(cmd *cobra.Command, _ []string) error {
if err != nil { if err != nil {
return err return err
} }
log := logger.New(logger.PlainLog, logger.VerbosityFromInt(verbosity)) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: logger.VerbosityFromInt(verbosity)}))
force, err := cmd.Flags().GetBool("force") force, err := cmd.Flags().GetBool("force")
if err != nil { if err != nil {
return fmt.Errorf("getting force flag: %w", err) return fmt.Errorf("getting force flag: %w", err)
@ -83,7 +85,7 @@ func runDeploy(cmd *cobra.Command, _ []string) error {
func deploy(cmd *cobra.Command, fileHandler file.Handler, constellationConfig *config.Config, func deploy(cmd *cobra.Command, fileHandler file.Handler, constellationConfig *config.Config,
transfer fileTransferer, transfer fileTransferer,
log *logger.Logger, log *slog.Logger,
) error { ) error {
binDir, err := cmd.Flags().GetString("bindir") binDir, err := cmd.Flags().GetString("bindir")
if err != nil { if err != nil {
@ -99,13 +101,13 @@ func deploy(cmd *cobra.Command, fileHandler file.Handler, constellationConfig *c
} }
if constellationConfig.IsReleaseImage() { if constellationConfig.IsReleaseImage() {
log.Infof("WARNING: Constellation image does not look like a debug image. Are you using a debug image?") log.Info("WARNING: Constellation image does not look like a debug image. Are you using a debug image?")
} }
if !constellationConfig.IsDebugCluster() { if !constellationConfig.IsDebugCluster() {
log.Infof("WARNING: The Constellation config has debugCluster set to false.") log.Info("WARNING: The Constellation config has debugCluster set to false.")
log.Infof("cdbg will likely not work unless you manually adjust the firewall / load balancing rules.") log.Info("cdbg will likely not work unless you manually adjust the firewall / load balancing rules.")
log.Infof("If you create the cluster with a debug image, you should also set debugCluster to true.") log.Info("If you create the cluster with a debug image, you should also set debugCluster to true.")
} }
ips, err := cmd.Flags().GetStringSlice("ips") ips, err := cmd.Flags().GetStringSlice("ips")
@ -171,14 +173,14 @@ type deployOnEndpointInput struct {
files []filetransfer.FileStat files []filetransfer.FileStat
infos map[string]string infos map[string]string
transfer fileTransferer transfer fileTransferer
log *logger.Logger log *slog.Logger
} }
// deployOnEndpoint deploys a custom built bootstrapper binary to a debugd endpoint. // deployOnEndpoint deploys a custom built bootstrapper binary to a debugd endpoint.
func deployOnEndpoint(ctx context.Context, in deployOnEndpointInput) error { func deployOnEndpoint(ctx context.Context, in deployOnEndpointInput) error {
ctx, cancel := context.WithTimeout(ctx, deployEndpointTimeout) ctx, cancel := context.WithTimeout(ctx, deployEndpointTimeout)
defer cancel() defer cancel()
in.log.Infof("Deploying on %v", in.debugdEndpoint) in.log.Info(fmt.Sprintf("Deploying on %v", in.debugdEndpoint))
client, closeAndWaitFn, err := newDebugdClient(ctx, in.debugdEndpoint, in.log) client, closeAndWaitFn, err := newDebugdClient(ctx, in.debugdEndpoint, in.log)
if err != nil { if err != nil {
@ -201,13 +203,13 @@ func deployOnEndpoint(ctx context.Context, in deployOnEndpointInput) error {
type closeAndWait func() type closeAndWait func()
// newDebugdClient creates a new gRPC client for the debugd service and logs the connection state changes. // newDebugdClient creates a new gRPC client for the debugd service and logs the connection state changes.
func newDebugdClient(ctx context.Context, ip string, log *logger.Logger) (pb.DebugdClient, closeAndWait, error) { func newDebugdClient(ctx context.Context, ip string, log *slog.Logger) (pb.DebugdClient, closeAndWait, error) {
conn, err := grpc.DialContext( conn, err := grpc.DialContext(
ctx, ctx,
net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)), net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)),
grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithTransportCredentials(insecure.NewCredentials()),
log.GetClientUnaryInterceptor(), logger.GetClientUnaryInterceptor(log),
log.GetClientStreamInterceptor(), logger.GetClientStreamInterceptor(log),
) )
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("connecting to other instance via gRPC: %w", err) return nil, nil, fmt.Errorf("connecting to other instance via gRPC: %w", err)
@ -221,8 +223,8 @@ func newDebugdClient(ctx context.Context, ip string, log *logger.Logger) (pb.Deb
return pb.NewDebugdClient(conn), closeAndWait, nil return pb.NewDebugdClient(conn), closeAndWait, nil
} }
func setInfo(ctx context.Context, log *logger.Logger, client pb.DebugdClient, infos map[string]string) error { func setInfo(ctx context.Context, log *slog.Logger, client pb.DebugdClient, infos map[string]string) error {
log.Infof("Setting info with length %d", len(infos)) log.Info(fmt.Sprintf("Setting info with length %d", len(infos)))
var infosPb []*pb.Info var infosPb []*pb.Info
for key, value := range infos { for key, value := range infos {
@ -238,17 +240,17 @@ func setInfo(ctx context.Context, log *logger.Logger, client pb.DebugdClient, in
switch status.Status { switch status.Status {
case pb.SetInfoStatus_SET_INFO_SUCCESS: case pb.SetInfoStatus_SET_INFO_SUCCESS:
log.Infof("Info set") log.Info("Info set")
case pb.SetInfoStatus_SET_INFO_ALREADY_SET: case pb.SetInfoStatus_SET_INFO_ALREADY_SET:
log.Infof("Info already set") log.Info("Info already set")
default: default:
log.Warnf("Unknown status %v", status.Status) log.Warn(fmt.Sprintf("Unknown status %v", status.Status))
} }
return nil return nil
} }
func uploadFiles(ctx context.Context, client pb.DebugdClient, in deployOnEndpointInput) error { func uploadFiles(ctx context.Context, client pb.DebugdClient, in deployOnEndpointInput) error {
in.log.Infof("Uploading files") in.log.Info("Uploading files")
stream, err := client.UploadFiles(ctx, grpc.WaitForReady(true)) stream, err := client.UploadFiles(ctx, grpc.WaitForReady(true))
if err != nil { if err != nil {
@ -266,9 +268,9 @@ func uploadFiles(ctx context.Context, client pb.DebugdClient, in deployOnEndpoin
} }
switch uploadResponse.Status { switch uploadResponse.Status {
case pb.UploadFilesStatus_UPLOAD_FILES_SUCCESS: case pb.UploadFilesStatus_UPLOAD_FILES_SUCCESS:
in.log.Infof("Upload successful") in.log.Info("Upload successful")
case pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_FINISHED: case pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_FINISHED:
in.log.Infof("Files already uploaded") in.log.Info("Files already uploaded")
case pb.UploadFilesStatus_UPLOAD_FILES_UPLOAD_FAILED: case pb.UploadFilesStatus_UPLOAD_FILES_UPLOAD_FAILED:
return fmt.Errorf("uploading files to %v failed: %v", in.debugdEndpoint, uploadResponse) return fmt.Errorf("uploading files to %v failed: %v", in.debugdEndpoint, uploadResponse)
case pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_STARTED: case pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_STARTED:

View file

@ -110,7 +110,7 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error {
ctx, file.OverrideServiceUnit, file.TargetPath, ctx, file.OverrideServiceUnit, file.TargetPath,
); err != nil { ); err != nil {
// continue on error to allow other units to be overridden // continue on error to allow other units to be overridden
d.log.With(slog.Any("error", err)).Error("Failed to override service unit %s", file.OverrideServiceUnit) d.log.With(slog.Any("error", err)).Error(fmt.Sprintf("Failed to override service unit %s", file.OverrideServiceUnit))
} }
} }

View file

@ -14,11 +14,11 @@ import (
"strconv" "strconv"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer" "github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer"
pb "github.com/edgelesssys/constellation/v2/debugd/service" pb "github.com/edgelesssys/constellation/v2/debugd/service"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer" "github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.uber.org/goleak" "go.uber.org/goleak"
"google.golang.org/grpc" "google.golang.org/grpc"

View file

@ -135,7 +135,7 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag
switch result { switch result {
case "done": case "done":
log.Info("%s on systemd unit %s succeeded", request.Action, request.Unit) log.Info(fmt.Sprintf("%s on systemd unit %s succeeded", request.Action, request.Unit))
return nil return nil
default: default:
@ -193,6 +193,6 @@ func (s *ServiceManager) OverrideServiceUnitExecStart(ctx context.Context, unitN
return fmt.Errorf("performing systemd unit restart: %w", err) return fmt.Errorf("performing systemd unit restart: %w", err)
} }
log.Info("Overrode systemd unit file execStart, performed daemon-reload and restarted unit %v", unitName) log.Info(fmt.Sprintf("Overrode systemd unit file execStart, performed daemon-reload and restarted unit %v", unitName))
return nil return nil
} }

View file

@ -14,7 +14,7 @@ import (
"sync" "sync"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"

View file

@ -74,7 +74,7 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov
return return
} }
logger.Info("Getting logstash pipeline template from image %s", versions.LogstashImage) logger.Info(fmt.Sprintf("Getting logstash pipeline template from image %s", versions.LogstashImage))
tmpl, err := getTemplate(ctx, logger, versions.LogstashImage, "/run/logstash/templates/pipeline.conf", "/run/logstash") tmpl, err := getTemplate(ctx, logger, versions.LogstashImage, "/run/logstash/templates/pipeline.conf", "/run/logstash")
if err != nil { if err != nil {
logger.Error("Getting logstash pipeline template: %v", err) logger.Error("Getting logstash pipeline template: %v", err)
@ -101,7 +101,7 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov
return return
} }
logger.Info("Getting filebeat config template from image %s", versions.FilebeatImage) logger.Info(fmt.Sprintf("Getting filebeat config template from image %s", versions.FilebeatImage))
tmpl, err = getTemplate(ctx, logger, versions.FilebeatImage, "/run/filebeat/templates/filebeat.yml", "/run/filebeat") tmpl, err = getTemplate(ctx, logger, versions.FilebeatImage, "/run/filebeat/templates/filebeat.yml", "/run/filebeat")
if err != nil { if err != nil {
logger.Error("Getting filebeat config template: %v", err) logger.Error("Getting filebeat config template: %v", err)
@ -177,7 +177,7 @@ func startPod(ctx context.Context, logger *slog.Logger) error {
"logcollection", "logcollection",
} }
createPodCmd := exec.CommandContext(ctx, "podman", createPodArgs...) createPodCmd := exec.CommandContext(ctx, "podman", createPodArgs...)
logger.Info("Create pod command: %v", createPodCmd.String()) logger.Info(fmt.Sprintf("Create pod command: %v", createPodCmd.String()))
if out, err := createPodCmd.CombinedOutput(); err != nil { if out, err := createPodCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to create pod: %w; output: %s", err, out) return fmt.Errorf("failed to create pod: %w; output: %s", err, out)
} }
@ -194,7 +194,7 @@ func startPod(ctx context.Context, logger *slog.Logger) error {
versions.LogstashImage, versions.LogstashImage,
} }
runLogstashCmd := exec.CommandContext(ctx, "podman", runLogstashArgs...) runLogstashCmd := exec.CommandContext(ctx, "podman", runLogstashArgs...)
logger.Info("Run logstash command: %v", runLogstashCmd.String()) logger.Info(fmt.Sprintf("Run logstash command: %v", runLogstashCmd.String()))
runLogstashCmd.Stdout = logstashLog runLogstashCmd.Stdout = logstashLog
runLogstashCmd.Stderr = logstashLog runLogstashCmd.Stderr = logstashLog
if err := runLogstashCmd.Start(); err != nil { if err := runLogstashCmd.Start(); err != nil {
@ -219,7 +219,7 @@ func startPod(ctx context.Context, logger *slog.Logger) error {
versions.FilebeatImage, versions.FilebeatImage,
} }
runFilebeatCmd := exec.CommandContext(ctx, "podman", runFilebeatArgs...) runFilebeatCmd := exec.CommandContext(ctx, "podman", runFilebeatArgs...)
logger.Info("Run filebeat command: %v", runFilebeatCmd.String()) logger.Info(fmt.Sprintf("Run filebeat command: %v", runFilebeatCmd.String()))
runFilebeatCmd.Stdout = filebeatLog runFilebeatCmd.Stdout = filebeatLog
runFilebeatCmd.Stderr = filebeatLog runFilebeatCmd.Stderr = filebeatLog
if err := runFilebeatCmd.Start(); err != nil { if err := runFilebeatCmd.Start(); err != nil {
@ -304,7 +304,7 @@ type cmdLogger struct {
} }
func (c *cmdLogger) Write(p []byte) (n int, err error) { func (c *cmdLogger) Write(p []byte) (n int, err error) {
c.logger.Info("%s", p) c.logger.Info(fmt.Sprintf("%s", p))
return len(p), nil return len(p), nil
} }

View file

@ -12,8 +12,9 @@ import (
"sync" "sync"
"testing" "testing"
"time" "time"
"log/slog"
"github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.uber.org/goleak" "go.uber.org/goleak"
) )

View file

@ -13,14 +13,15 @@ import (
"net" "net"
"strconv" "strconv"
"testing" "testing"
"log/slog"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd/deploy" "github.com/edgelesssys/constellation/v2/debugd/internal/debugd/deploy"
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd/info" "github.com/edgelesssys/constellation/v2/debugd/internal/debugd/info"
"github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer" "github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer"
pb "github.com/edgelesssys/constellation/v2/debugd/service" pb "github.com/edgelesssys/constellation/v2/debugd/service"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer" "github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"

View file

@ -9,6 +9,7 @@ SPDX-License-Identifier: AGPL-3.0-only
package filetransfer package filetransfer
import ( import (
"fmt"
"errors" "errors"
"io" "io"
"io/fs" "io/fs"
@ -19,7 +20,6 @@ import (
"github.com/edgelesssys/constellation/v2/debugd/internal/debugd" "github.com/edgelesssys/constellation/v2/debugd/internal/debugd"
"github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer/streamer" "github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer/streamer"
pb "github.com/edgelesssys/constellation/v2/debugd/service" pb "github.com/edgelesssys/constellation/v2/debugd/service"
"go.uber.org/zap"
) )
// RecvFilesStream is a stream that receives FileTransferMessages. // RecvFilesStream is a stream that receives FileTransferMessages.
@ -146,7 +146,7 @@ func (s *FileTransferer) handleFileRecv(stream RecvFilesStream) (bool, error) {
if header == nil { if header == nil {
return false, errors.New("first message must be a header message") return false, errors.New("first message must be a header message")
} }
s.log.Info("Starting file receive of %q", header.TargetPath) s.log.Info(fmt.Sprintf("Starting file receive of %q", header.TargetPath))
s.addFile(FileStat{ s.addFile(FileStat{
SourcePath: header.TargetPath, SourcePath: header.TargetPath,
TargetPath: header.TargetPath, TargetPath: header.TargetPath,
@ -160,10 +160,10 @@ func (s *FileTransferer) handleFileRecv(stream RecvFilesStream) (bool, error) {
}) })
recvChunkStream := &recvChunkStream{stream: stream} recvChunkStream := &recvChunkStream{stream: stream}
if err := s.streamer.WriteStream(header.TargetPath, recvChunkStream, s.showProgress); err != nil { if err := s.streamer.WriteStream(header.TargetPath, recvChunkStream, s.showProgress); err != nil {
s.log.With(slog.Any("error", err)).Error("Receive of file %q failed", header.TargetPath) s.log.With(slog.Any("error", err)).Error(fmt.Sprintf("Receive of file %q failed", header.TargetPath))
return false, err return false, err
} }
s.log.Info("Finished file receive of %q", header.TargetPath) s.log.Info(fmt.Sprintf("Finished file receive of %q", header.TargetPath))
return false, nil return false, nil
} }

View file

@ -10,10 +10,11 @@ import (
"errors" "errors"
"io" "io"
"testing" "testing"
"log/slog"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer/streamer" "github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer/streamer"
pb "github.com/edgelesssys/constellation/v2/debugd/service" pb "github.com/edgelesssys/constellation/v2/debugd/service"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"

View file

@ -14,6 +14,7 @@ import (
"net" "net"
"os" "os"
"path/filepath" "path/filepath"
"fmt"
"github.com/edgelesssys/constellation/v2/disk-mapper/internal/diskencryption" "github.com/edgelesssys/constellation/v2/disk-mapper/internal/diskencryption"
"github.com/edgelesssys/constellation/v2/disk-mapper/internal/recoveryserver" "github.com/edgelesssys/constellation/v2/disk-mapper/internal/recoveryserver"
@ -124,7 +125,7 @@ func main() {
metadataClient = qemucloud.New() metadataClient = qemucloud.New()
default: default:
log.Error("CSP %s is not supported by Constellation", *csp) log.Error(fmt.Sprintf("CSP %s is not supported by Constellation", *csp))
os.Exit(1) os.Exit(1)
} }

View file

@ -14,13 +14,13 @@ import (
"testing" "testing"
"time" "time"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/disk-mapper/recoverproto" "github.com/edgelesssys/constellation/v2/disk-mapper/recoverproto"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer" "github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer" "github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
"github.com/edgelesssys/constellation/v2/internal/kms/kms" "github.com/edgelesssys/constellation/v2/internal/kms/kms"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"

View file

@ -16,12 +16,12 @@ import (
"testing" "testing"
"time" "time"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials" "github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials"
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer" "github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer" "github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/role"
"github.com/edgelesssys/constellation/v2/joinservice/joinproto" "github.com/edgelesssys/constellation/v2/joinservice/joinproto"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"

View file

@ -196,7 +196,7 @@ func (s *Manager) LogDevices() error {
var stat syscall.Statfs_t var stat syscall.Statfs_t
dev := "/dev/" + device.Name() dev := "/dev/" + device.Name()
if err := syscall.Statfs(dev, &stat); err != nil { if err := syscall.Statfs(dev, &stat); err != nil {
s.log.With(slog.Any("error", err)).Error("failed to statfs %s", dev) s.log.With(slog.Any("error", err)).Error(fmt.Sprintf("failed to statfs %s", dev))
continue continue
} }
@ -205,7 +205,7 @@ func (s *Manager) LogDevices() error {
free := stat.Bfree * uint64(stat.Bsize) free := stat.Bfree * uint64(stat.Bsize)
avail := stat.Bavail * uint64(stat.Bsize) avail := stat.Bavail * uint64(stat.Bsize)
s.log.Info( s.log.Info(fmt.Sprintf(
"Name: %-15s, Size: %-10d, Mode: %s, ModTime: %s, Size = %-10d, Free = %-10d, Available = %-10d\n", "Name: %-15s, Size: %-10d, Mode: %s, ModTime: %s, Size = %-10d, Free = %-10d, Available = %-10d\n",
dev, dev,
device.Size(), device.Size(),
@ -213,7 +213,7 @@ func (s *Manager) LogDevices() error {
device.ModTime(), device.ModTime(),
size, size,
free, free,
avail) avail))
} }
return nil return nil
} }

View file

@ -17,10 +17,10 @@ import (
"sync" "sync"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/attestation/vtpm" "github.com/edgelesssys/constellation/v2/internal/attestation/vtpm"
"github.com/edgelesssys/constellation/v2/internal/crypto" "github.com/edgelesssys/constellation/v2/internal/crypto"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/nodestate" "github.com/edgelesssys/constellation/v2/internal/nodestate"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"

View file

@ -103,7 +103,7 @@ func TestMapper(t *testing.T) {
require.NoError(setup(1), "failed to setup test disk") require.NoError(setup(1), "failed to setup test disk")
defer func() { require.NoError(teardown(), "failed to delete test disk") }() defer func() { require.NoError(teardown(), "failed to delete test disk") }()
mapper, free, err := diskencryption.New(devicePath, slog.New(slog.NewPlainTextHandler(logger.TestWriter{T: t}, nil)) mapper, free, err := diskencryption.New(devicePath, slog.New(slog.NewPlainTextHandler(os.Stderr, nil))
require.NoError(err, "failed to initialize crypt device") require.NoError(err, "failed to initialize crypt device")
defer free() defer free()

View file

@ -11,6 +11,7 @@ import (
"errors" "errors"
"log/slog" "log/slog"
"os" "os"
"fmt"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues"
@ -39,7 +40,7 @@ func runCheck(cmd *cobra.Command, _ []string) error {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
filesHelper, err := bazelfiles.New() filesHelper, err := bazelfiles.New()
if err != nil { if err != nil {
@ -88,7 +89,7 @@ func runCheck(cmd *cobra.Command, _ []string) error {
} }
func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *slog.Logger) (issByFile issues.ByFile, err error) { func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *slog.Logger) (issByFile issues.ByFile, err error) {
log.Debug("Checking file: %s", bazelFile.RelPath) log.Debug(fmt.Sprintf("Checking file: %s", bazelFile.RelPath))
issByFile = issues.NewByFile() issByFile = issues.NewByFile()
buildfile, err := fileHelper.LoadFile(bazelFile) buildfile, err := fileHelper.LoadFile(bazelFile)
if err != nil { if err != nil {
@ -96,12 +97,12 @@ func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCh
} }
found := rules.Rules(buildfile, rules.SupportedRules) found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 { if len(found) == 0 {
log.Debug("No rules found in file: %s", bazelFile.RelPath) log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
return issByFile, nil return issByFile, nil
} }
log.Debug("Found %d rules in file: %s", len(found), bazelFile.RelPath) log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
for _, rule := range found { for _, rule := range found {
log.Debug("Checking rule: %s", rule.Name()) log.Debug(fmt.Sprintf("Checking rule: %s", rule.Name()))
// check if the rule is a valid pinned dependency rule (has all required attributes) // check if the rule is a valid pinned dependency rule (has all required attributes)
if issues := rules.ValidatePinned(rule); len(issues) > 0 { if issues := rules.ValidatePinned(rule); len(issues) > 0 {
issByFile.Add(rule.Name(), issues...) issByFile.Add(rule.Name(), issues...)

View file

@ -11,6 +11,7 @@ import (
"errors" "errors"
"log/slog" "log/slog"
"os" "os"
"fmt"
"github.com/bazelbuild/buildtools/build" "github.com/bazelbuild/buildtools/build"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
@ -39,7 +40,7 @@ func runFix(cmd *cobra.Command, _ []string) error {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
fileHelper, err := bazelfiles.New() fileHelper, err := bazelfiles.New()
if err != nil { if err != nil {
@ -76,7 +77,7 @@ func runFix(cmd *cobra.Command, _ []string) error {
} }
} }
if len(issues) > 0 { if len(issues) > 0 {
log.Warn("Found %d unfixable issues in rules", len(issues)) log.Warn(fmt.Sprintf("Found %d unfixable issues in rules", len(issues)))
issues.Report(cmd.OutOrStdout()) issues.Report(cmd.OutOrStdout())
return errors.New("found issues in rules") return errors.New("found issues in rules")
} }
@ -88,17 +89,17 @@ func runFix(cmd *cobra.Command, _ []string) error {
func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) { func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) {
iss = issues.NewByFile() iss = issues.NewByFile()
var changed bool // true if any rule in this file was changed var changed bool // true if any rule in this file was changed
log.Info("Checking file: %s", bazelFile.RelPath) log.Info(fmt.Sprintf("Checking file: %s", bazelFile.RelPath))
buildfile, err := fileHelper.LoadFile(bazelFile) buildfile, err := fileHelper.LoadFile(bazelFile)
if err != nil { if err != nil {
return iss, err return iss, err
} }
found := rules.Rules(buildfile, rules.SupportedRules) found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 { if len(found) == 0 {
log.Debug("No rules found in file: %s", bazelFile.RelPath) log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
return iss, nil return iss, nil
} }
log.Debug("Found %d rules in file: %s", len(found), bazelFile.RelPath) log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
for _, rule := range found { for _, rule := range found {
changedRule, ruleIssues := fixRule(ctx, mirrorUpload, rule, log) changedRule, ruleIssues := fixRule(ctx, mirrorUpload, rule, log)
if len(ruleIssues) > 0 { if len(ruleIssues) > 0 {
@ -108,11 +109,11 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
} }
if len(iss) > 0 { if len(iss) > 0 {
log.Warn("File %s has issues. Not saving!", bazelFile.RelPath) log.Warn(fmt.Sprintf("File %s has issues. Not saving!", bazelFile.RelPath))
return iss, nil return iss, nil
} }
if !changed { if !changed {
log.Debug("No changes to file: %s", bazelFile.RelPath) log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath))
return iss, nil return iss, nil
} }
if dryRun { if dryRun {
@ -123,7 +124,7 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff) log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
return iss, nil return iss, nil
} }
log.Info("Saving updated file: %s", bazelFile.RelPath) log.Info(fmt.Sprintf("Saving updated file: %s", bazelFile.RelPath))
if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil { if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil {
return iss, err return iss, err
} }
@ -146,7 +147,7 @@ func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *bu
} }
func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) { func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) {
log.Debug("Fixing rule: %s", rule.Name()) log.Debug(fmt.Sprintf("Fixing rule: %s", rule.Name()))
// try to learn the hash // try to learn the hash
if hash, err := rules.GetHash(rule); err != nil || hash == "" { if hash, err := rules.GetHash(rule); err != nil || hash == "" {

View file

@ -15,6 +15,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@ -23,7 +24,6 @@ import (
s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3"
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/edgelesssys/constellation/v2/internal/logger"
) )
// Maintainer can upload and download files to and from a CAS mirror. // Maintainer can upload and download files to and from a CAS mirror.
@ -39,11 +39,11 @@ type Maintainer struct {
unauthenticated bool unauthenticated bool
dryRun bool dryRun bool
log *logger.Logger log *slog.Logger
} }
// NewUnauthenticated creates a new Maintainer that dose not require authentication can only download files from a CAS mirror. // NewUnauthenticated creates a new Maintainer that dose not require authentication can only download files from a CAS mirror.
func NewUnauthenticated(mirrorBaseURL string, dryRun bool, log *logger.Logger) *Maintainer { func NewUnauthenticated(mirrorBaseURL string, dryRun bool, log *slog.Logger) *Maintainer {
return &Maintainer{ return &Maintainer{
httpClient: http.DefaultClient, httpClient: http.DefaultClient,
mirrorBaseURL: mirrorBaseURL, mirrorBaseURL: mirrorBaseURL,
@ -54,7 +54,7 @@ func NewUnauthenticated(mirrorBaseURL string, dryRun bool, log *logger.Logger) *
} }
// New creates a new Maintainer that can upload and download files to and from a CAS mirror. // New creates a new Maintainer that can upload and download files to and from a CAS mirror.
func New(ctx context.Context, region, bucket, mirrorBaseURL string, dryRun bool, log *logger.Logger) (*Maintainer, error) { func New(ctx context.Context, region, bucket, mirrorBaseURL string, dryRun bool, log *slog.Logger) (*Maintainer, error) {
cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region)) cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region))
if err != nil { if err != nil {
return nil, err return nil, err
@ -95,17 +95,17 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
} }
for _, url := range urls { for _, url := range urls {
m.log.Debugf("Mirroring file with hash %v from %q", hash, url) m.log.Debug("Mirroring file with hash %v from %q", hash, url)
body, err := m.downloadFromUpstream(ctx, url) body, err := m.downloadFromUpstream(ctx, url)
if err != nil { if err != nil {
m.log.Debugf("Failed to download file from %q: %v", url, err) m.log.Debug("Failed to download file from %q: %v", url, err)
continue continue
} }
defer body.Close() defer body.Close()
streamedHash := sha256.New() streamedHash := sha256.New()
tee := io.TeeReader(body, streamedHash) tee := io.TeeReader(body, streamedHash)
if err := m.put(ctx, hash, tee); err != nil { if err := m.put(ctx, hash, tee); err != nil {
m.log.Warnf("Failed to stream file from upstream %q to mirror: %v.. Trying next url.", url, err) m.log.Warn("Failed to stream file from upstream %q to mirror: %v.. Trying next url.", url, err)
continue continue
} }
actualHash := hex.EncodeToString(streamedHash.Sum(nil)) actualHash := hex.EncodeToString(streamedHash.Sum(nil))
@ -117,7 +117,7 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
if err != nil { if err != nil {
return err return err
} }
m.log.Debugf("File uploaded successfully to mirror from %q as %q", url, pubURL) m.log.Debug("File uploaded successfully to mirror from %q as %q", url, pubURL)
return nil return nil
} }
return fmt.Errorf("failed to download / reupload file with hash %v from any of the urls: %v", hash, urls) return fmt.Errorf("failed to download / reupload file with hash %v from any of the urls: %v", hash, urls)
@ -126,19 +126,19 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
// Learn downloads a file from one of the existing (non-mirror) urls, hashes it and returns the hash. // Learn downloads a file from one of the existing (non-mirror) urls, hashes it and returns the hash.
func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) { func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) {
for _, url := range urls { for _, url := range urls {
m.log.Debugf("Learning new hash from %q", url) m.log.Debug(fmt.Sprintf("Learning new hash from %q", url))
body, err := m.downloadFromUpstream(ctx, url) body, err := m.downloadFromUpstream(ctx, url)
if err != nil { if err != nil {
m.log.Debugf("Failed to download file from %q: %v", url, err) m.log.Debug("Failed to download file from %q: %v", url, err)
continue continue
} }
defer body.Close() defer body.Close()
streamedHash := sha256.New() streamedHash := sha256.New()
if _, err := io.Copy(streamedHash, body); err != nil { if _, err := io.Copy(streamedHash, body); err != nil {
m.log.Debugf("Failed to stream file from %q: %v", url, err) m.log.Debug("Failed to stream file from %q: %v", url, err)
} }
learnedHash := hex.EncodeToString(streamedHash.Sum(nil)) learnedHash := hex.EncodeToString(streamedHash.Sum(nil))
m.log.Debugf("File successfully downloaded from %q with %q", url, learnedHash) m.log.Debug("File successfully downloaded from %q with %q", url, learnedHash)
return learnedHash, nil return learnedHash, nil
} }
return "", fmt.Errorf("failed to download file / learn hash from any of the urls: %v", urls) return "", fmt.Errorf("failed to download file / learn hash from any of the urls: %v", urls)
@ -146,7 +146,7 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) {
// Check checks if a file is present and has the correct hash in the CAS mirror. // Check checks if a file is present and has the correct hash in the CAS mirror.
func (m *Maintainer) Check(ctx context.Context, expectedHash string) error { func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
m.log.Debugf("Checking consistency of object with hash %v", expectedHash) m.log.Debug(fmt.Sprintf("Checking consistency of object with hash %v", expectedHash))
if m.unauthenticated { if m.unauthenticated {
return m.checkUnauthenticated(ctx, expectedHash) return m.checkUnauthenticated(ctx, expectedHash)
} }
@ -157,7 +157,7 @@ func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
// It uses the authenticated CAS s3 endpoint to download the file metadata. // It uses the authenticated CAS s3 endpoint to download the file metadata.
func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string) error { func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string) error {
key := path.Join(keyBase, expectedHash) key := path.Join(keyBase, expectedHash)
m.log.Debugf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key) m.log.Debug("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key)
attributes, err := m.objectStorageClient.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{ attributes, err := m.objectStorageClient.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
Bucket: &m.bucket, Bucket: &m.bucket,
Key: &key, Key: &key,
@ -174,7 +174,7 @@ func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string
// checksums are not guaranteed to be present // checksums are not guaranteed to be present
// and if present, they are only meaningful for single part objects // and if present, they are only meaningful for single part objects
// fallback if checksum cannot be verified from attributes // fallback if checksum cannot be verified from attributes
m.log.Debugf("S3 object attributes cannot be used to verify key %v. Falling back to download.", key) m.log.Debug(fmt.Sprintf("S3 object attributes cannot be used to verify key %v. Falling back to download.", key))
return m.checkUnauthenticated(ctx, expectedHash) return m.checkUnauthenticated(ctx, expectedHash)
} }
@ -192,7 +192,7 @@ func (m *Maintainer) checkUnauthenticated(ctx context.Context, expectedHash stri
if err != nil { if err != nil {
return err return err
} }
m.log.Debugf("Check: http get {Url: %v}", pubURL) m.log.Debug(fmt.Sprintf("Check: http get {Url: %v}", pubURL))
req, err := http.NewRequestWithContext(ctx, http.MethodGet, pubURL, http.NoBody) req, err := http.NewRequestWithContext(ctx, http.MethodGet, pubURL, http.NoBody)
if err != nil { if err != nil {
return err return err
@ -221,10 +221,10 @@ func (m *Maintainer) put(ctx context.Context, hash string, data io.Reader) error
key := path.Join(keyBase, hash) key := path.Join(keyBase, hash)
if m.dryRun { if m.dryRun {
m.log.Debugf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key) m.log.Debug("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key)
return nil return nil
} }
m.log.Debugf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key) m.log.Debug(fmt.Sprintf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key))
_, err := m.uploadClient.Upload(ctx, &s3.PutObjectInput{ _, err := m.uploadClient.Upload(ctx, &s3.PutObjectInput{
Bucket: &m.bucket, Bucket: &m.bucket,
Key: &key, Key: &key,

View file

@ -13,11 +13,12 @@ import (
"log" "log"
"net/http" "net/http"
"testing" "testing"
"log/slog"
"github.com/edgelesssys/constellation/v2/internal/logger"
s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"

View file

@ -11,6 +11,7 @@ import (
"errors" "errors"
"log/slog" "log/slog"
"os" "os"
"fmt"
"github.com/bazelbuild/buildtools/build" "github.com/bazelbuild/buildtools/build"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
@ -39,7 +40,7 @@ func runUpgrade(cmd *cobra.Command, _ []string) error {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
fileHelper, err := bazelfiles.New() fileHelper, err := bazelfiles.New()
if err != nil { if err != nil {
@ -76,7 +77,7 @@ func runUpgrade(cmd *cobra.Command, _ []string) error {
} }
} }
if len(issues) > 0 { if len(issues) > 0 {
log.Warn("Found %d issues in rules", len(issues)) log.Warn(fmt.Sprintf("Found %d issues in rules", len(issues)))
issues.Report(cmd.OutOrStdout()) issues.Report(cmd.OutOrStdout())
return errors.New("found issues in rules") return errors.New("found issues in rules")
} }
@ -88,17 +89,17 @@ func runUpgrade(cmd *cobra.Command, _ []string) error {
func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) { func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) {
iss = issues.NewByFile() iss = issues.NewByFile()
var changed bool // true if any rule in this file was changed var changed bool // true if any rule in this file was changed
log.Info("Checking file: %s", bazelFile.RelPath) log.Info(fmt.Sprintf("Checking file: %s", bazelFile.RelPath))
buildfile, err := fileHelper.LoadFile(bazelFile) buildfile, err := fileHelper.LoadFile(bazelFile)
if err != nil { if err != nil {
return iss, err return iss, err
} }
found := rules.Rules(buildfile, rules.SupportedRules) found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 { if len(found) == 0 {
log.Debug("No rules found in file: %s", bazelFile.RelPath) log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
return iss, nil return iss, nil
} }
log.Debug("Found %d rules in file: %s", len(found), bazelFile.RelPath) log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
for _, rule := range found { for _, rule := range found {
changedRule, ruleIssues := upgradeRule(ctx, mirrorUpload, rule, log) changedRule, ruleIssues := upgradeRule(ctx, mirrorUpload, rule, log)
if len(ruleIssues) > 0 { if len(ruleIssues) > 0 {
@ -108,11 +109,11 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
} }
if len(iss) > 0 { if len(iss) > 0 {
log.Warn("File %s has issues. Not saving!", bazelFile.RelPath) log.Warn(fmt.Sprintf("File %s has issues. Not saving!", bazelFile.RelPath))
return iss, nil return iss, nil
} }
if !changed { if !changed {
log.Debug("No changes to file: %s", bazelFile.RelPath) log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath))
return iss, nil return iss, nil
} }
if dryRun { if dryRun {
@ -123,7 +124,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff) log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
return iss, nil return iss, nil
} }
log.Info("Saving updated file: %s", bazelFile.RelPath) log.Info(fmt.Sprintf("Saving updated file: %s", bazelFile.RelPath))
if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil { if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil {
return iss, err return iss, err
} }
@ -132,7 +133,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
} }
func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) { func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) {
log.Debug("Upgrading rule: %s", rule.Name()) log.Debug(fmt.Sprintf("Upgrading rule: %s", rule.Name()))
upstreamURLs, err := rules.UpstreamURLs(rule) upstreamURLs, err := rules.UpstreamURLs(rule)
if errors.Is(err, rules.ErrNoUpstreamURL) { if errors.Is(err, rules.ErrNoUpstreamURL) {

View file

@ -44,14 +44,14 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag) registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag)
if err != nil { if err != nil {
return fmt.Errorf("splitting OCI image reference %q: %w", flags.imageRepoTag, err) return fmt.Errorf("splitting OCI image reference %q: %w", flags.imageRepoTag, err)
} }
log.Debug("Generating Go code for OCI image %s.", name) log.Debug(fmt.Sprintf("Generating Go code for OCI image %s.", name))
ociIndexPath := filepath.Join(flags.ociPath, "index.json") ociIndexPath := filepath.Join(flags.ociPath, "index.json")
index, err := os.Open(ociIndexPath) index, err := os.Open(ociIndexPath)
@ -77,7 +77,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
return err return err
} }
log.Debug("OCI image digest: %s", digest) log.Debug(fmt.Sprintf("OCI image digest: %s", digest))
if err := inject.Render(out, inject.PinningValues{ if err := inject.Render(out, inject.PinningValues{
Package: flags.pkg, Package: flags.pkg,
@ -91,7 +91,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("rendering Go code: %w", err) return fmt.Errorf("rendering Go code: %w", err)
} }
log.Debug("Go code created at %q 🤖", flags.output) log.Debug(fmt.Sprintf("Go code created at %q 🤖", flags.output))
return nil return nil
} }

View file

@ -35,9 +35,9 @@ func runMerge(cmd *cobra.Command, _ []string) error {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
log.Debug("Merging sum file from %q into %q.", flags.inputs, flags.output) log.Debug(fmt.Sprintf("Merging sum file from %q into %q.", flags.inputs, flags.output))
var out io.Writer var out io.Writer
if flags.output == "-" { if flags.output == "-" {
@ -60,7 +60,7 @@ func runMerge(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("creating merged sum file: %w", err) return fmt.Errorf("creating merged sum file: %w", err)
} }
log.Debug("Sum file created at %q 🤖", flags.output) log.Debug(fmt.Sprintf("Sum file created at %q 🤖", flags.output))
return nil return nil
} }

View file

@ -44,7 +44,7 @@ func runAWS(cmd *cobra.Command, _ []string) error {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil { if err != nil {

View file

@ -45,7 +45,7 @@ func runAzure(cmd *cobra.Command, _ []string) error {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil { if err != nil {

View file

@ -45,7 +45,7 @@ func runGCP(cmd *cobra.Command, _ []string) error {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil { if err != nil {

View file

@ -14,7 +14,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/logger"
infoupload "github.com/edgelesssys/constellation/v2/internal/osimage/imageinfo" infoupload "github.com/edgelesssys/constellation/v2/internal/osimage/imageinfo"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -51,7 +50,7 @@ func runInfo(cmd *cobra.Command, args []string) error {
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
info, err := readInfoArgs(args) info, err := readInfoArgs(args)
if err != nil { if err != nil {
return err return err
@ -71,7 +70,7 @@ func runInfo(cmd *cobra.Command, args []string) error {
if err != nil { if err != nil {
return fmt.Errorf("uploading image info: %w", err) return fmt.Errorf("uploading image info: %w", err)
} }
log.Info("Uploaded image info to %s", url) log.Info(fmt.Sprintf("Uploaded image info to %s", url))
return nil return nil
} }

View file

@ -54,7 +54,7 @@ func runEnvelopeMeasurements(cmd *cobra.Command, _ []string) error {
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
f, err := os.Open(flags.in) f, err := os.Open(flags.in)
if err != nil { if err != nil {

View file

@ -45,7 +45,7 @@ func runMergeMeasurements(cmd *cobra.Command, args []string) error {
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
mergedMeasurements, err := readMeasurementsArgs(args) mergedMeasurements, err := readMeasurementsArgs(args)
if err != nil { if err != nil {

View file

@ -12,7 +12,6 @@ import (
"os" "os"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/osimage/measurementsuploader" "github.com/edgelesssys/constellation/v2/internal/osimage/measurementsuploader"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -54,7 +53,7 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error {
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
uploadC, uploadCClose, err := measurementsuploader.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) uploadC, uploadCClose, err := measurementsuploader.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil { if err != nil {

View file

@ -31,7 +31,7 @@ func runNOP(cmd *cobra.Command, provider cloudprovider.Provider, _ []string) err
} }
flags.provider = provider flags.provider = provider
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil { if err != nil {

View file

@ -115,14 +115,14 @@ func uploadReport(ctx context.Context,
return fmt.Errorf("kind %s not supported", cfg.kind) return fmt.Errorf("kind %s not supported", cfg.kind)
} }
log.Info("Reading SNP report from file: %s", cfg.path) log.Info(fmt.Sprintf("Reading SNP report from file: %s", cfg.path))
var report verify.Report var report verify.Report
if err := fs.ReadJSON(cfg.path, &report); err != nil { if err := fs.ReadJSON(cfg.path, &report); err != nil {
return fmt.Errorf("reading snp report: %w", err) return fmt.Errorf("reading snp report: %w", err)
} }
inputVersion := convertTCBVersionToSNPVersion(report.SNPReport.LaunchTCB) inputVersion := convertTCBVersionToSNPVersion(report.SNPReport.LaunchTCB)
log.Info("Input report: %+v", inputVersion) log.Info(fmt.Sprintf("Input report: %+v", inputVersion))
latestAPIVersionAPI, err := attestationconfigapi.NewFetcherWithCustomCDNAndCosignKey(cfg.url, cfg.cosignPublicKey).FetchSEVSNPVersionLatest(ctx, attestation) latestAPIVersionAPI, err := attestationconfigapi.NewFetcherWithCustomCDNAndCosignKey(cfg.url, cfg.cosignPublicKey).FetchSEVSNPVersionLatest(ctx, attestation)
if err != nil { if err != nil {
@ -136,7 +136,7 @@ func uploadReport(ctx context.Context,
latestAPIVersion := latestAPIVersionAPI.SEVSNPVersion latestAPIVersion := latestAPIVersionAPI.SEVSNPVersion
if err := client.UploadSEVSNPVersionLatest(ctx, attestation, inputVersion, latestAPIVersion, cfg.uploadDate, cfg.force); err != nil { if err := client.UploadSEVSNPVersionLatest(ctx, attestation, inputVersion, latestAPIVersion, cfg.uploadDate, cfg.force); err != nil {
if errors.Is(err, attestationconfigapi.ErrNoNewerVersion) { if errors.Is(err, attestationconfigapi.ErrNoNewerVersion) {
log.Info("Input version: %+v is not newer than latest API version: %+v", inputVersion, latestAPIVersion) log.Info(fmt.Sprintf("Input version: %+v is not newer than latest API version: %+v", inputVersion, latestAPIVersion))
return nil return nil
} }
return fmt.Errorf("updating latest version: %w", err) return fmt.Errorf("updating latest version: %w", err)

View file

@ -131,7 +131,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error {
Bucket: &c.bucket, Bucket: &c.bucket,
Prefix: &path, Prefix: &path,
} }
c.Logger.Debug("Listing objects in %s", path) c.Logger.Debug(fmt.Sprintf("Listing objects in %s", path))
objs := []s3types.Object{} objs := []s3types.Object{}
out := &s3.ListObjectsV2Output{IsTruncated: ptr(true)} out := &s3.ListObjectsV2Output{IsTruncated: ptr(true)}
for out.IsTruncated != nil && *out.IsTruncated { for out.IsTruncated != nil && *out.IsTruncated {
@ -142,10 +142,10 @@ func (c *Client) DeletePath(ctx context.Context, path string) error {
} }
objs = append(objs, out.Contents...) objs = append(objs, out.Contents...)
} }
c.Logger.Debug("Found %d objects in %s", len(objs), path) c.Logger.Debug(fmt.Sprintf("Found %d objects in %s", len(objs), path))
if len(objs) == 0 { if len(objs) == 0 {
c.Logger.Warn("Path %s is already empty", path) c.Logger.Warn(fmt.Sprintf("Path %s is already empty", path))
return nil return nil
} }
@ -155,7 +155,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error {
} }
if c.DryRun { if c.DryRun {
c.Logger.Debug("DryRun: Deleting %d objects with IDs %v", len(objs), objIDs) c.Logger.Debug(fmt.Sprintf("DryRun: Deleting %d objects with IDs %v", len(objs), objIDs))
return nil return nil
} }
@ -167,7 +167,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error {
Objects: objIDs, Objects: objIDs,
}, },
} }
c.Logger.Debug("Deleting %d objects in %s", len(objs), path) c.Logger.Debug(fmt.Sprintf("Deleting %d objects in %s", len(objs), path))
if _, err := c.s3Client.DeleteObjects(ctx, deleteIn); err != nil { if _, err := c.s3Client.DeleteObjects(ctx, deleteIn); err != nil {
return fmt.Errorf("deleting objects in %s: %w", path, err) return fmt.Errorf("deleting objects in %s: %w", path, err)
} }

View file

@ -53,7 +53,7 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
log.Debug("Validating flags") log.Debug("Validating flags")
if err := flags.validate(log); err != nil { if err := flags.validate(log); err != nil {
@ -93,8 +93,8 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) {
} }
} }
log.Info("List major->minor URL: %s", ver.ListURL(versionsapi.GranularityMajor)) log.Info(fmt.Sprintf("List major->minor URL: %s", ver.ListURL(versionsapi.GranularityMajor)))
log.Info("List minor->patch URL: %s", ver.ListURL(versionsapi.GranularityMinor)) log.Info(fmt.Sprintf("List minor->patch URL: %s", ver.ListURL(versionsapi.GranularityMinor)))
return nil return nil
} }
@ -135,7 +135,7 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, kind version
return fmt.Errorf("failed to add %s version: %w", gran.String(), err) return fmt.Errorf("failed to add %s version: %w", gran.String(), err)
} }
log.Info("Added %q to list", insertVersion) log.Info(fmt.Sprintf("Added %q to list", insertVersion))
return nil return nil
} }
@ -154,11 +154,11 @@ func updateLatest(ctx context.Context, client *versionsapi.Client, kind versions
} }
if latest.Version == ver.Version() { if latest.Version == ver.Version() {
log.Info("Version %q is already latest version", ver) log.Info(fmt.Sprintf("Version %q is already latest version", ver.Version()))
return nil return nil
} }
log.Info("Setting %q as latest version", ver) log.Info(fmt.Sprintf("Setting %q as latest version", ver.Version()))
latest = versionsapi.Latest{ latest = versionsapi.Latest{
Ref: ver.Ref(), Ref: ver.Ref(),
Stream: ver.Stream(), Stream: ver.Stream(),
@ -203,7 +203,7 @@ func (f *addFlags) validate(log *slog.Logger) error {
} }
if f.release { if f.release {
log.Debug("Setting ref to %q, as release flag is set", versionsapi.ReleaseRef) log.Debug(fmt.Sprintf("Setting ref to %q, as release flag is set", versionsapi.ReleaseRef))
f.ref = versionsapi.ReleaseRef f.ref = versionsapi.ReleaseRef
} else { } else {
log.Debug("Setting latest to true, as release flag is not set") log.Debug("Setting latest to true, as release flag is not set")

View file

@ -39,7 +39,7 @@ func runLatest(cmd *cobra.Command, _ []string) (retErr error) {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
log.Debug("Validating flags") log.Debug("Validating flags")
if err := flags.validate(); err != nil { if err := flags.validate(); err != nil {

View file

@ -44,7 +44,7 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
log.Debug("Validating flags") log.Debug("Validating flags")
if err := flags.validate(); err != nil { if err := flags.validate(); err != nil {
@ -82,7 +82,7 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) {
patchVersions, err := listPatchVersions(cmd.Context(), client, flags.ref, flags.stream, minorVersions) patchVersions, err := listPatchVersions(cmd.Context(), client, flags.ref, flags.stream, minorVersions)
var errNotFound *apiclient.NotFoundError var errNotFound *apiclient.NotFoundError
if err != nil && errors.As(err, &errNotFound) { if err != nil && errors.As(err, &errNotFound) {
log.Info("No patch versions found for ref %q, stream %q and minor versions %v.", flags.ref, flags.stream, minorVersions) log.Info(fmt.Sprintf("No patch versions found for ref %q, stream %q and minor versions %v.", flags.ref, flags.stream, minorVersions))
return nil return nil
} else if err != nil { } else if err != nil {
return err return err

View file

@ -75,7 +75,7 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) {
return err return err
} }
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug("Parsed flags: %+v", flags) log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
log.Debug("Validating flags") log.Debug("Validating flags")
if err := flags.validate(); err != nil { if err := flags.validate(); err != nil {
@ -120,14 +120,14 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) {
} }
if flags.all { if flags.all {
log.Info("Deleting ref %s", flags.ref) log.Info(fmt.Sprintf("Deleting ref %s", flags.ref))
if err := deleteRef(cmd.Context(), imageClients, flags.ref, flags.dryrun, log); err != nil { if err := deleteRef(cmd.Context(), imageClients, flags.ref, flags.dryrun, log); err != nil {
return fmt.Errorf("deleting ref: %w", err) return fmt.Errorf("deleting ref: %w", err)
} }
return nil return nil
} }
log.Info("Deleting single version %s", flags.ver.ShortPath()) log.Info(fmt.Sprintf("Deleting single version %s", flags.ver.ShortPath()))
if err := deleteSingleVersion(cmd.Context(), imageClients, flags.ver, flags.dryrun, log); err != nil { if err := deleteSingleVersion(cmd.Context(), imageClients, flags.ver, flags.dryrun, log); err != nil {
return fmt.Errorf("deleting single version: %w", err) return fmt.Errorf("deleting single version: %w", err)
} }
@ -138,12 +138,12 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) {
func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *slog.Logger) error { func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *slog.Logger) error {
var retErr error var retErr error
log.Debug("Deleting images for %s", ver.Version) log.Debug(fmt.Sprintf("Deleting images for %s", ver.Version()))
if err := deleteImage(ctx, clients, ver, dryrun, log); err != nil { if err := deleteImage(ctx, clients, ver, dryrun, log); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting images: %w", err)) retErr = errors.Join(retErr, fmt.Errorf("deleting images: %w", err))
} }
log.Debug("Deleting version %s from versions API", ver.Version) log.Debug(fmt.Sprintf("Deleting version %s from versions API", ver.Version()))
if err := clients.version.DeleteVersion(ctx, ver); err != nil { if err := clients.version.DeleteVersion(ctx, ver); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting version from versions API: %w", err)) retErr = errors.Join(retErr, fmt.Errorf("deleting version from versions API: %w", err))
} }
@ -154,12 +154,12 @@ func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versio
func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun bool, log *slog.Logger) error { func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun bool, log *slog.Logger) error {
var vers []versionsapi.Version var vers []versionsapi.Version
for _, stream := range []string{"nightly", "console", "debug"} { for _, stream := range []string{"nightly", "console", "debug"} {
log.Info("Listing versions of stream %s", stream) log.Info(fmt.Sprintf("Listing versions of stream %s", stream))
minorVersions, err := listMinorVersions(ctx, clients.version, ref, stream) minorVersions, err := listMinorVersions(ctx, clients.version, ref, stream)
var notFoundErr *apiclient.NotFoundError var notFoundErr *apiclient.NotFoundError
if errors.As(err, &notFoundErr) { if errors.As(err, &notFoundErr) {
log.Debug("No minor versions found for stream %s", stream) log.Debug(fmt.Sprintf("No minor versions found for stream %s", stream))
continue continue
} else if err != nil { } else if err != nil {
return fmt.Errorf("listing minor versions for stream %s: %w", stream, err) return fmt.Errorf("listing minor versions for stream %s: %w", stream, err)
@ -167,7 +167,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b
patchVersions, err := listPatchVersions(ctx, clients.version, ref, stream, minorVersions) patchVersions, err := listPatchVersions(ctx, clients.version, ref, stream, minorVersions)
if errors.As(err, &notFoundErr) { if errors.As(err, &notFoundErr) {
log.Debug("No patch versions found for stream %s", stream) log.Debug(fmt.Sprintf("No patch versions found for stream %s", stream))
continue continue
} else if err != nil { } else if err != nil {
return fmt.Errorf("listing patch versions for stream %s: %w", stream, err) return fmt.Errorf("listing patch versions for stream %s: %w", stream, err)
@ -175,7 +175,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b
vers = append(vers, patchVersions...) vers = append(vers, patchVersions...)
} }
log.Info("Found %d versions to delete", len(vers)) log.Info(fmt.Sprintf("Found %d versions to delete", len(vers)))
var retErr error var retErr error
@ -185,7 +185,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b
} }
} }
log.Info("Deleting ref %s from versions API", ref) log.Info(fmt.Sprintf("Deleting ref %s from versions API", ref))
if err := clients.version.DeleteRef(ctx, ref); err != nil { if err := clients.version.DeleteRef(ctx, ref); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting ref from versions API: %w", err)) retErr = errors.Join(retErr, fmt.Errorf("deleting ref from versions API: %w", err))
} }
@ -204,7 +204,7 @@ func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Ve
imageInfo, err := clients.version.FetchImageInfo(ctx, imageInfo) imageInfo, err := clients.version.FetchImageInfo(ctx, imageInfo)
var notFound *apiclient.NotFoundError var notFound *apiclient.NotFoundError
if errors.As(err, &notFound) { if errors.As(err, &notFound) {
log.Warn("Image info for %s not found", ver.Version) log.Warn(fmt.Sprintf("Image info for %s not found", ver.Version()))
log.Warn("Skipping image deletion") log.Warn("Skipping image deletion")
return nil return nil
} else if err != nil { } else if err != nil {
@ -214,17 +214,17 @@ func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Ve
for _, entry := range imageInfo.List { for _, entry := range imageInfo.List {
switch entry.CSP { switch entry.CSP {
case "aws": case "aws":
log.Info("Deleting AWS images from %s", imageInfo.JSONPath()) log.Info(fmt.Sprintf("Deleting AWS images from %s", imageInfo.JSONPath()))
if err := clients.aws.deleteImage(ctx, entry.Reference, entry.Region, dryrun, log); err != nil { if err := clients.aws.deleteImage(ctx, entry.Reference, entry.Region, dryrun, log); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting AWS image %s: %w", entry.Reference, err)) retErr = errors.Join(retErr, fmt.Errorf("deleting AWS image %s: %w", entry.Reference, err))
} }
case "gcp": case "gcp":
log.Info("Deleting GCP images from %s", imageInfo.JSONPath()) log.Info(fmt.Sprintf("Deleting GCP images from %s", imageInfo.JSONPath()))
if err := clients.gcp.deleteImage(ctx, entry.Reference, dryrun, log); err != nil { if err := clients.gcp.deleteImage(ctx, entry.Reference, dryrun, log); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting GCP image %s: %w", entry.Reference, err)) retErr = errors.Join(retErr, fmt.Errorf("deleting GCP image %s: %w", entry.Reference, err))
} }
case "azure": case "azure":
log.Info("Deleting Azure images from %s", imageInfo.JSONPath()) log.Info(fmt.Sprintf("Deleting Azure images from %s", imageInfo.JSONPath()))
if err := clients.az.deleteImage(ctx, entry.Reference, dryrun, log); err != nil { if err := clients.az.deleteImage(ctx, entry.Reference, dryrun, log); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting Azure image %s: %w", entry.Reference, err)) retErr = errors.Join(retErr, fmt.Errorf("deleting Azure image %s: %w", entry.Reference, err))
} }
@ -406,7 +406,7 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string,
return err return err
} }
a.ec2 = ec2.NewFromConfig(cfg) a.ec2 = ec2.NewFromConfig(cfg)
log.Debug("Deleting resources in AWS region %s", region) log.Debug(fmt.Sprintf("Deleting resources in AWS region %s", region))
snapshotID, err := a.getSnapshotID(ctx, ami, log) snapshotID, err := a.getSnapshotID(ctx, ami, log)
if err != nil { if err != nil {
@ -427,7 +427,7 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string,
} }
func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool, log *slog.Logger) error { func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool, log *slog.Logger) error {
log.Debug("Deregistering image %s", ami) log.Debug(fmt.Sprintf("Deregistering image %s", ami))
deregisterReq := ec2.DeregisterImageInput{ deregisterReq := ec2.DeregisterImageInput{
ImageId: &ami, ImageId: &ami,
@ -438,7 +438,7 @@ func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool
if errors.As(err, &apiErr) && if errors.As(err, &apiErr) &&
(apiErr.ErrorCode() == "InvalidAMIID.NotFound" || (apiErr.ErrorCode() == "InvalidAMIID.NotFound" ||
apiErr.ErrorCode() == "InvalidAMIID.Unavailable") { apiErr.ErrorCode() == "InvalidAMIID.Unavailable") {
log.Warn("AWS image %s not found", ami) log.Warn(fmt.Sprintf("AWS image %s not found", ami))
return nil return nil
} }
@ -446,7 +446,7 @@ func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool
} }
func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Logger) (string, error) { func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Logger) (string, error) {
log.Debug("Describing image %s", ami) log.Debug(fmt.Sprintf("Describing image %s", ami))
req := ec2.DescribeImagesInput{ req := ec2.DescribeImagesInput{
ImageIds: []string{ami}, ImageIds: []string{ami},
@ -482,7 +482,7 @@ func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Log
} }
func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryrun bool, log *slog.Logger) error { func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryrun bool, log *slog.Logger) error {
log.Debug("Deleting AWS snapshot %s", snapshotID) log.Debug(fmt.Sprintf("Deleting AWS snapshot %s", snapshotID))
req := ec2.DeleteSnapshotInput{ req := ec2.DeleteSnapshotInput{
SnapshotId: &snapshotID, SnapshotId: &snapshotID,
@ -493,7 +493,7 @@ func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryru
if errors.As(err, &apiErr) && if errors.As(err, &apiErr) &&
(apiErr.ErrorCode() == "InvalidSnapshot.NotFound" || (apiErr.ErrorCode() == "InvalidSnapshot.NotFound" ||
apiErr.ErrorCode() == "InvalidSnapshot.Unavailable") { apiErr.ErrorCode() == "InvalidSnapshot.Unavailable") {
log.Warn("AWS snapshot %s not found", snapshotID) log.Warn(fmt.Sprintf("AWS snapshot %s not found", snapshotID))
return nil return nil
} }
@ -536,14 +536,14 @@ func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun boo
} }
if dryrun { if dryrun {
log.Debug("DryRun: delete image request: %v", req) log.Debug(fmt.Sprintf("DryRun: delete image request: %v", req))
return nil return nil
} }
log.Debug("Deleting image %s", image) log.Debug(fmt.Sprintf("Deleting image %s", image))
op, err := g.compute.Delete(ctx, req) op, err := g.compute.Delete(ctx, req)
if err != nil && strings.Contains(err.Error(), "404") { if err != nil && strings.Contains(err.Error(), "404") {
log.Warn("GCP image %s not found", image) log.Warn(fmt.Sprintf("GCP image %s not found", image))
return nil return nil
} else if err != nil { } else if err != nil {
return fmt.Errorf("deleting image %s: %w", image, err) return fmt.Errorf("deleting image %s: %w", image, err)
@ -631,11 +631,11 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool
} }
if dryrun { if dryrun {
log.Debug("DryRun: delete image %v", azImage) log.Debug(fmt.Sprintf("DryRun: delete image %v", azImage))
return nil return nil
} }
log.Debug("Deleting image %q, version %q", azImage.imageDefinition, azImage.version) log.Debug(fmt.Sprintf("Deleting image %q, version %q", azImage.imageDefinition, azImage.version))
poller, err := a.imageVersions.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery, poller, err := a.imageVersions.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery,
azImage.imageDefinition, azImage.version, nil) azImage.imageDefinition, azImage.version, nil)
if err != nil { if err != nil {
@ -647,7 +647,7 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool
return fmt.Errorf("waiting for operation: %w", err) return fmt.Errorf("waiting for operation: %w", err)
} }
log.Debug("Checking if image definition %q still has versions left", azImage.imageDefinition) log.Debug(fmt.Sprintf("Checking if image definition %q still has versions left", azImage.imageDefinition))
pager := a.imageVersions.NewListByGalleryImagePager(azImage.resourceGroup, azImage.gallery, pager := a.imageVersions.NewListByGalleryImagePager(azImage.resourceGroup, azImage.gallery,
azImage.imageDefinition, nil) azImage.imageDefinition, nil)
for pager.More() { for pager.More() {
@ -656,14 +656,14 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool
return fmt.Errorf("listing image versions of image definition %s: %w", azImage.imageDefinition, err) return fmt.Errorf("listing image versions of image definition %s: %w", azImage.imageDefinition, err)
} }
if len(nextResult.Value) != 0 { if len(nextResult.Value) != 0 {
log.Debug("Image definition %q still has versions left, won't be deleted", azImage.imageDefinition) log.Debug(fmt.Sprintf("Image definition %q still has versions left, won't be deleted", azImage.imageDefinition))
return nil return nil
} }
} }
time.Sleep(15 * time.Second) // Azure needs time understand that there is no version left... time.Sleep(15 * time.Second) // Azure needs time understand that there is no version left...
log.Debug("Deleting image definition %s", azImage.imageDefinition) log.Debug(fmt.Sprintf("Deleting image definition %s", azImage.imageDefinition))
op, err := a.image.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, nil) op, err := a.image.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, nil)
if err != nil { if err != nil {
return fmt.Errorf("deleting image definition %s: %w", azImage.imageDefinition, err) return fmt.Errorf("deleting image definition %s: %w", azImage.imageDefinition, err)
@ -707,10 +707,10 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Lo
imageDefinition := m[2] imageDefinition := m[2]
version := m[3] version := m[3]
log.Debug( log.Debug(fmt.Sprintf(
"Image matches community image format, gallery public name: %s, image definition: %s, version: %s", "Image matches community image format, gallery public name: %s, image definition: %s, version: %s",
galleryPublicName, imageDefinition, version, galleryPublicName, imageDefinition, version,
) ))
var galleryName string var galleryName string
pager := a.galleries.NewListPager(nil) pager := a.galleries.NewListPager(nil)
@ -725,15 +725,15 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Lo
continue continue
} }
if v.Properties.SharingProfile == nil { if v.Properties.SharingProfile == nil {
log.Debug("Skipping gallery %s with nil sharing profile", *v.Name) log.Debug(fmt.Sprintf("Skipping gallery %s with nil sharing profile", *v.Name))
continue continue
} }
if v.Properties.SharingProfile.CommunityGalleryInfo == nil { if v.Properties.SharingProfile.CommunityGalleryInfo == nil {
log.Debug("Skipping gallery %s with nil community gallery info", *v.Name) log.Debug(fmt.Sprintf("Skipping gallery %s with nil community gallery info", *v.Name))
continue continue
} }
if v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames == nil { if v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames == nil {
log.Debug("Skipping gallery %s with nil public names", *v.Name) log.Debug(fmt.Sprintf("Skipping gallery %s with nil public names", *v.Name))
continue continue
} }
for _, publicName := range v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames { for _, publicName := range v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames {

View file

@ -131,18 +131,18 @@ func (c *Client) DeleteRef(ctx context.Context, ref string) error {
func (c *Client) DeleteVersion(ctx context.Context, ver Version) error { func (c *Client) DeleteVersion(ctx context.Context, ver Version) error {
var retErr error var retErr error
c.Client.Logger.Debugf("Deleting version %s from minor version list", ver.version) c.Client.Logger.Debug(fmt.Sprintf("Deleting version %s from minor version list", ver.version))
possibleNewLatest, err := c.deleteVersionFromMinorVersionList(ctx, ver) possibleNewLatest, err := c.deleteVersionFromMinorVersionList(ctx, ver)
if err != nil { if err != nil {
retErr = errors.Join(retErr, fmt.Errorf("removing from minor version list: %w", err)) retErr = errors.Join(retErr, fmt.Errorf("removing from minor version list: %w", err))
} }
c.Client.Logger.Debugf("Checking latest version for %s", ver.version) c.Client.Logger.Debug(fmt.Sprintf("Checking latest version for %s", ver.version))
if err := c.deleteVersionFromLatest(ctx, ver, possibleNewLatest); err != nil { if err := c.deleteVersionFromLatest(ctx, ver, possibleNewLatest); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("updating latest version: %w", err)) retErr = errors.Join(retErr, fmt.Errorf("updating latest version: %w", err))
} }
c.Client.Logger.Debugf("Deleting artifact path %s for %s", ver.ArtifactPath(APIV1), ver.version) c.Client.Logger.Debug(fmt.Sprintf("Deleting artifact path %s for %s", ver.ArtifactPath(APIV1), ver.version))
if err := c.Client.DeletePath(ctx, ver.ArtifactPath(APIV1)); err != nil { if err := c.Client.DeletePath(ctx, ver.ArtifactPath(APIV1)); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting artifact path: %w", err)) retErr = errors.Join(retErr, fmt.Errorf("deleting artifact path: %w", err))
} }
@ -159,20 +159,20 @@ func (c *Client) deleteVersionFromMinorVersionList(ctx context.Context, ver Vers
Base: ver.WithGranularity(GranularityMinor), Base: ver.WithGranularity(GranularityMinor),
Kind: VersionKindImage, Kind: VersionKindImage,
} }
c.Client.Logger.Debugf("Fetching minor version list for version %s", ver.version) c.Client.Logger.Debug(fmt.Sprintf("Fetching minor version list for version %s", ver.version))
minorList, err := c.FetchVersionList(ctx, minorList) minorList, err := c.FetchVersionList(ctx, minorList)
var notFoundErr *apiclient.NotFoundError var notFoundErr *apiclient.NotFoundError
if errors.As(err, &notFoundErr) { if errors.As(err, &notFoundErr) {
c.Client.Logger.Warnf("Minor version list for version %s not found", ver.version) c.Client.Logger.Warn(fmt.Sprintf("Minor version list for version %s not found", ver.version))
c.Client.Logger.Warnf("Skipping update of minor version list") c.Client.Logger.Warn("Skipping update of minor version list")
return nil, nil return nil, nil
} else if err != nil { } else if err != nil {
return nil, fmt.Errorf("fetching minor version list for version %s: %w", ver.version, err) return nil, fmt.Errorf("fetching minor version list for version %s: %w", ver.version, err)
} }
if !minorList.Contains(ver.version) { if !minorList.Contains(ver.version) {
c.Client.Logger.Warnf("Version %s is not in minor version list %s", ver.version, minorList.JSONPath()) c.Client.Logger.Warn("Version %s is not in minor version list %s", ver.version, minorList.JSONPath())
c.Client.Logger.Warnf("Skipping update of minor version list") c.Client.Logger.Warn("Skipping update of minor version list")
return nil, nil return nil, nil
} }
@ -192,20 +192,20 @@ func (c *Client) deleteVersionFromMinorVersionList(ctx context.Context, ver Vers
Kind: VersionKindImage, Kind: VersionKindImage,
Version: minorList.Versions[len(minorList.Versions)-1], Version: minorList.Versions[len(minorList.Versions)-1],
} }
c.Client.Logger.Debugf("Possible latest version replacement %q", latest.Version) c.Client.Logger.Debug(fmt.Sprintf("Possible latest version replacement %q", latest.Version))
} }
if c.Client.DryRun { if c.Client.DryRun {
c.Client.Logger.Debugf("DryRun: Updating minor version list %s to %v", minorList.JSONPath(), minorList) c.Client.Logger.Debug(fmt.Sprintf("DryRun: Updating minor version list %s to %v", minorList.JSONPath(), minorList))
return latest, nil return latest, nil
} }
c.Client.Logger.Debugf("Updating minor version list %s", minorList.JSONPath()) c.Client.Logger.Debug(fmt.Sprintf("Updating minor version list %s", minorList.JSONPath()))
if err := c.UpdateVersionList(ctx, minorList); err != nil { if err := c.UpdateVersionList(ctx, minorList); err != nil {
return latest, fmt.Errorf("updating minor version list %s: %w", minorList.JSONPath(), err) return latest, fmt.Errorf("updating minor version list %s: %w", minorList.JSONPath(), err)
} }
c.Client.Logger.Debugf("Removed version %s from minor version list %s", ver.version, minorList.JSONPath()) c.Client.Logger.Debug(fmt.Sprintf("Removed version %s from minor version list %s", ver.version, minorList.JSONPath()))
return latest, nil return latest, nil
} }
@ -216,33 +216,33 @@ func (c *Client) deleteVersionFromLatest(ctx context.Context, ver Version, possi
Stream: ver.stream, Stream: ver.stream,
Kind: VersionKindImage, Kind: VersionKindImage,
} }
c.Client.Logger.Debugf("Fetching latest version from %s", latest.JSONPath()) c.Client.Logger.Debug(fmt.Sprintf("Fetching latest version from %s", latest.JSONPath()))
latest, err := c.FetchVersionLatest(ctx, latest) latest, err := c.FetchVersionLatest(ctx, latest)
var notFoundErr *apiclient.NotFoundError var notFoundErr *apiclient.NotFoundError
if errors.As(err, &notFoundErr) { if errors.As(err, &notFoundErr) {
c.Client.Logger.Warnf("Latest version for %s not found", latest.JSONPath()) c.Client.Logger.Warn(fmt.Sprintf("Latest version for %s not found", latest.JSONPath()))
return nil return nil
} else if err != nil { } else if err != nil {
return fmt.Errorf("fetching latest version: %w", err) return fmt.Errorf("fetching latest version: %w", err)
} }
if latest.Version != ver.version { if latest.Version != ver.version {
c.Client.Logger.Debugf("Latest version is %s, not the deleted version %s", latest.Version, ver.version) c.Client.Logger.Debug(fmt.Sprintf("Latest version is %s, not the deleted version %s", latest.Version, ver.version))
return nil return nil
} }
if possibleNewLatest == nil { if possibleNewLatest == nil {
c.Client.Logger.Errorf("Latest version is %s, but no new latest version was found", latest.Version) c.Client.Logger.Error(fmt.Sprintf("Latest version is %s, but no new latest version was found", latest.Version))
c.Client.Logger.Errorf("A manual update of latest at %s might be needed", latest.JSONPath()) c.Client.Logger.Error(fmt.Sprintf("A manual update of latest at %s might be needed", latest.JSONPath()))
return fmt.Errorf("latest version is %s, but no new latest version was found", latest.Version) return fmt.Errorf("latest version is %s, but no new latest version was found", latest.Version)
} }
if c.Client.DryRun { if c.Client.DryRun {
c.Client.Logger.Debugf("Would update latest version from %s to %s", latest.Version, possibleNewLatest.Version) c.Client.Logger.Debug(fmt.Sprintf("Would update latest version from %s to %s", latest.Version, possibleNewLatest.Version))
return nil return nil
} }
c.Client.Logger.Infof("Updating latest version from %s to %s", latest.Version, possibleNewLatest.Version) c.Client.Logger.Info("Updating latest version from %s to %s", latest.Version, possibleNewLatest.Version)
if err := c.UpdateVersionLatest(ctx, *possibleNewLatest); err != nil { if err := c.UpdateVersionLatest(ctx, *possibleNewLatest); err != nil {
return fmt.Errorf("updating latest version: %w", err) return fmt.Errorf("updating latest version: %w", err)
} }

View file

@ -53,10 +53,10 @@ type Logger interface {
type NOPLogger struct{} type NOPLogger struct{}
// Infof is a no-op. // Infof is a no-op.
func (NOPLogger) Infof(string, ...interface{}) {} func (NOPLogger) Info(string, ...interface{}) {}
// Warnf is a no-op. // Warnf is a no-op.
func (NOPLogger) Warnf(string, ...interface{}) {} func (NOPLogger) Warn(string, ...interface{}) {}
// DeriveClusterID derives the cluster ID from a salt and secret value. // DeriveClusterID derives the cluster ID from a salt and secret value.
func DeriveClusterID(secret, salt []byte) ([]byte, error) { func DeriveClusterID(secret, salt []byte) ([]byte, error) {

View file

@ -212,14 +212,14 @@ func (v *Validator) checkIDKeyDigest(ctx context.Context, report *spb.Attestatio
// the MAA if necessary. // the MAA if necessary.
switch v.config.FirmwareSignerConfig.EnforcementPolicy { switch v.config.FirmwareSignerConfig.EnforcementPolicy {
case idkeydigest.MAAFallback: case idkeydigest.MAAFallback:
v.log.Infof( v.log.Info(
"Configured idkeydigests %x don't contain reported idkeydigest %x, falling back to MAA validation", "Configured idkeydigests %x don't contain reported idkeydigest %x, falling back to MAA validation",
v.config.FirmwareSignerConfig.AcceptedKeyDigests, v.config.FirmwareSignerConfig.AcceptedKeyDigests,
report.Report.IdKeyDigest, report.Report.IdKeyDigest,
) )
return v.maa.validateToken(ctx, v.config.FirmwareSignerConfig.MAAURL, maaToken, extraData) return v.maa.validateToken(ctx, v.config.FirmwareSignerConfig.MAAURL, maaToken, extraData)
case idkeydigest.WarnOnly: case idkeydigest.WarnOnly:
v.log.Warnf( v.log.Warn(
"Configured idkeydigests %x don't contain reported idkeydigest %x", "Configured idkeydigests %x don't contain reported idkeydigest %x",
v.config.FirmwareSignerConfig.AcceptedKeyDigests, v.config.FirmwareSignerConfig.AcceptedKeyDigests,
report.Report.IdKeyDigest, report.Report.IdKeyDigest,
@ -240,10 +240,10 @@ func (v *Validator) checkIDKeyDigest(ctx context.Context, report *spb.Attestatio
type nopAttestationLogger struct{} type nopAttestationLogger struct{}
// Infof is a no-op. // Infof is a no-op.
func (nopAttestationLogger) Infof(string, ...interface{}) {} func (nopAttestationLogger) Info(string, ...interface{}) {}
// Warnf is a no-op. // Warnf is a no-op.
func (nopAttestationLogger) Warnf(string, ...interface{}) {} func (nopAttestationLogger) Warn(string, ...interface{}) {}
type maaValidator interface { type maaValidator interface {
validateToken(ctx context.Context, maaURL string, token string, extraData []byte) error validateToken(ctx context.Context, maaURL string, token string, extraData []byte) error

View file

@ -17,6 +17,7 @@ import (
"os" "os"
"regexp" "regexp"
"testing" "testing"
"log/slog"
"github.com/edgelesssys/constellation/v2/internal/attestation" "github.com/edgelesssys/constellation/v2/internal/attestation"
"github.com/edgelesssys/constellation/v2/internal/attestation/idkeydigest" "github.com/edgelesssys/constellation/v2/internal/attestation/idkeydigest"

View file

@ -57,7 +57,7 @@ func (a *InstanceInfo) addReportSigner(att *spb.Attestation, report *spb.Report,
// If the VCEK certificate is present, parse it and format it. // If the VCEK certificate is present, parse it and format it.
reportSigner, err := a.ParseReportSigner() reportSigner, err := a.ParseReportSigner()
if err != nil { if err != nil {
logger.Warnf("Error parsing report signer: %v", err) logger.Warn("Error parsing report signer: %v", err)
} }
signerInfo, err := abi.ParseSignerInfo(report.GetSignerInfo()) signerInfo, err := abi.ParseSignerInfo(report.GetSignerInfo())
@ -77,7 +77,7 @@ func (a *InstanceInfo) addReportSigner(att *spb.Attestation, report *spb.Report,
// If no VCEK is present, fetch it from AMD. // If no VCEK is present, fetch it from AMD.
if reportSigner == nil { if reportSigner == nil {
logger.Infof("VCEK certificate not present, falling back to retrieving it from AMD KDS") logger.Info("VCEK certificate not present, falling back to retrieving it from AMD KDS")
vcekURL := kds.VCEKCertURL(productName, report.GetChipId(), kds.TCBVersion(report.GetReportedTcb())) vcekURL := kds.VCEKCertURL(productName, report.GetChipId(), kds.TCBVersion(report.GetReportedTcb()))
vcekData, err = getter.Get(vcekURL) vcekData, err = getter.Get(vcekURL)
if err != nil { if err != nil {
@ -123,29 +123,29 @@ func (a *InstanceInfo) AttestationWithCerts(getter trust.HTTPSGetter,
// If the certificate chain from THIM is present, parse it and format it. // If the certificate chain from THIM is present, parse it and format it.
ask, ark, err := a.ParseCertChain() ask, ark, err := a.ParseCertChain()
if err != nil { if err != nil {
logger.Warnf("Error parsing certificate chain: %v", err) logger.Warn("Error parsing certificate chain: %v", err)
} }
if ask != nil { if ask != nil {
logger.Infof("Using ASK certificate from Azure THIM") logger.Info("Using ASK certificate from Azure THIM")
att.CertificateChain.AskCert = ask.Raw att.CertificateChain.AskCert = ask.Raw
} }
if ark != nil { if ark != nil {
logger.Infof("Using ARK certificate from Azure THIM") logger.Info("Using ARK certificate from Azure THIM")
att.CertificateChain.ArkCert = ark.Raw att.CertificateChain.ArkCert = ark.Raw
} }
// If a cached ASK or an ARK from the Constellation config is present, use it. // If a cached ASK or an ARK from the Constellation config is present, use it.
if att.CertificateChain.AskCert == nil && fallbackCerts.ask != nil { if att.CertificateChain.AskCert == nil && fallbackCerts.ask != nil {
logger.Infof("Using cached ASK certificate") logger.Info("Using cached ASK certificate")
att.CertificateChain.AskCert = fallbackCerts.ask.Raw att.CertificateChain.AskCert = fallbackCerts.ask.Raw
} }
if att.CertificateChain.ArkCert == nil && fallbackCerts.ark != nil { if att.CertificateChain.ArkCert == nil && fallbackCerts.ark != nil {
logger.Infof("Using ARK certificate from %s", constants.ConfigFilename) logger.Info("Using ARK certificate from %s", constants.ConfigFilename)
att.CertificateChain.ArkCert = fallbackCerts.ark.Raw att.CertificateChain.ArkCert = fallbackCerts.ark.Raw
} }
// Otherwise, retrieve it from AMD KDS. // Otherwise, retrieve it from AMD KDS.
if att.CertificateChain.AskCert == nil || att.CertificateChain.ArkCert == nil { if att.CertificateChain.AskCert == nil || att.CertificateChain.ArkCert == nil {
logger.Infof( logger.Info(
"Certificate chain not fully present (ARK present: %t, ASK present: %t), falling back to retrieving it from AMD KDS", "Certificate chain not fully present (ARK present: %t, ASK present: %t), falling back to retrieving it from AMD KDS",
(att.CertificateChain.ArkCert != nil), (att.CertificateChain.ArkCert != nil),
(att.CertificateChain.AskCert != nil), (att.CertificateChain.AskCert != nil),
@ -155,11 +155,11 @@ func (a *InstanceInfo) AttestationWithCerts(getter trust.HTTPSGetter,
return nil, fmt.Errorf("retrieving certificate chain from AMD KDS: %w", err) return nil, fmt.Errorf("retrieving certificate chain from AMD KDS: %w", err)
} }
if att.CertificateChain.AskCert == nil && kdsCertChain.Ask != nil { if att.CertificateChain.AskCert == nil && kdsCertChain.Ask != nil {
logger.Infof("Using ASK certificate from AMD KDS") logger.Info("Using ASK certificate from AMD KDS")
att.CertificateChain.AskCert = kdsCertChain.Ask.Raw att.CertificateChain.AskCert = kdsCertChain.Ask.Raw
} }
if att.CertificateChain.ArkCert == nil && kdsCertChain.Ask != nil { if att.CertificateChain.ArkCert == nil && kdsCertChain.Ask != nil {
logger.Infof("Using ARK certificate from AMD KDS") logger.Info("Using ARK certificate from AMD KDS")
att.CertificateChain.ArkCert = kdsCertChain.Ark.Raw att.CertificateChain.ArkCert = kdsCertChain.Ark.Raw
} }
} }

View file

@ -13,6 +13,7 @@ import (
"regexp" "regexp"
"strings" "strings"
"testing" "testing"
"log/slog"
"github.com/edgelesssys/constellation/v2/internal/attestation/snp/testdata" "github.com/edgelesssys/constellation/v2/internal/attestation/snp/testdata"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"

View file

@ -37,10 +37,10 @@ func NewIssuer(log attestation.Logger) *Issuer {
// Issue issues a TDX attestation document. // Issue issues a TDX attestation document.
func (i *Issuer) Issue(_ context.Context, userData []byte, nonce []byte) (attDoc []byte, err error) { func (i *Issuer) Issue(_ context.Context, userData []byte, nonce []byte) (attDoc []byte, err error) {
i.log.Infof("Issuing attestation statement") i.log.Info("Issuing attestation statement")
defer func() { defer func() {
if err != nil { if err != nil {
i.log.Warnf("Failed to issue attestation document: %s", err) i.log.Warn("Failed to issue attestation document: %s", err)
} }
}() }()

View file

@ -49,10 +49,10 @@ func NewValidator(cfg *config.QEMUTDX, log attestation.Logger) *Validator {
// Validate validates the given attestation document using TDX attestation. // Validate validates the given attestation document using TDX attestation.
func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte) (userData []byte, err error) { func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte) (userData []byte, err error) {
v.log.Infof("Validating attestation document") v.log.Info("Validating attestation document")
defer func() { defer func() {
if err != nil { if err != nil {
v.log.Warnf("Failed to validate attestation document: %s", err) v.log.Warn("Failed to validate attestation document: %s", err)
} }
}() }()
@ -83,7 +83,7 @@ func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte
// Verify the quote against the expected measurements. // Verify the quote against the expected measurements.
warnings, errs := v.expected.Compare(tdMeasure) warnings, errs := v.expected.Compare(tdMeasure)
for _, warning := range warnings { for _, warning := range warnings {
v.log.Warnf(warning) v.log.Warn(warning)
} }
if len(errs) > 0 { if len(errs) > 0 {
return nil, fmt.Errorf("measurement validation failed:\n%w", errors.Join(errs...)) return nil, fmt.Errorf("measurement validation failed:\n%w", errors.Join(errs...))

View file

@ -103,10 +103,10 @@ func NewIssuer(
// Issue generates an attestation document using a TPM. // Issue generates an attestation document using a TPM.
func (i *Issuer) Issue(ctx context.Context, userData []byte, nonce []byte) (res []byte, err error) { func (i *Issuer) Issue(ctx context.Context, userData []byte, nonce []byte) (res []byte, err error) {
i.log.Infof("Issuing attestation statement") i.log.Info("Issuing attestation statement")
defer func() { defer func() {
if err != nil { if err != nil {
i.log.Warnf("Failed to issue attestation statement: %s", err) i.log.Warn("Failed to issue attestation statement: %s", err)
} }
}() }()
@ -147,7 +147,7 @@ func (i *Issuer) Issue(ctx context.Context, userData []byte, nonce []byte) (res
return nil, fmt.Errorf("marshaling attestation document: %w", err) return nil, fmt.Errorf("marshaling attestation document: %w", err)
} }
i.log.Infof("Successfully issued attestation statement") i.log.Info("Successfully issued attestation statement")
return rawAttDoc, nil return rawAttDoc, nil
} }
@ -177,10 +177,10 @@ func NewValidator(expected measurements.M, getTrustedKey GetTPMTrustedAttestatio
// Validate a TPM based attestation. // Validate a TPM based attestation.
func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte) (userData []byte, err error) { func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte) (userData []byte, err error) {
v.log.Infof("Validating attestation document") v.log.Info("Validating attestation document")
defer func() { defer func() {
if err != nil { if err != nil {
v.log.Warnf("Failed to validate attestation document: %s", err) v.log.Warn("Failed to validate attestation document: %s", err)
} }
}() }()
@ -233,13 +233,13 @@ func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte
} }
warnings, errs := v.expected.Compare(attDoc.Attestation.Quotes[quoteIdx].Pcrs.Pcrs) warnings, errs := v.expected.Compare(attDoc.Attestation.Quotes[quoteIdx].Pcrs.Pcrs)
for _, warning := range warnings { for _, warning := range warnings {
v.log.Warnf(warning) v.log.Warn(warning)
} }
if len(errs) > 0 { if len(errs) > 0 {
return nil, fmt.Errorf("measurement validation failed:\n%w", errors.Join(errs...)) return nil, fmt.Errorf("measurement validation failed:\n%w", errors.Join(errs...))
} }
v.log.Infof("Successfully validated attestation document") v.log.Info("Successfully validated attestation document")
return attDoc.UserData, nil return attDoc.UserData, nil
} }

View file

@ -15,6 +15,7 @@ import (
"io" "io"
"os" "os"
"testing" "testing"
"log/slog"
tpmclient "github.com/google/go-tpm-tools/client" tpmclient "github.com/google/go-tpm-tools/client"
"github.com/google/go-tpm-tools/proto/attest" "github.com/google/go-tpm-tools/proto/attest"
@ -481,10 +482,10 @@ type testAttestationLogger struct {
warnings []string warnings []string
} }
func (w *testAttestationLogger) Infof(format string, args ...any) { func (w *testAttestationLogger) Info(format string, args ...any) {
w.infos = append(w.infos, fmt.Sprintf(format, args...)) w.infos = append(w.infos, fmt.Sprintf(format, args...))
} }
func (w *testAttestationLogger) Warnf(format string, args ...any) { func (w *testAttestationLogger) Warn(format string, args ...any) {
w.warnings = append(w.warnings, fmt.Sprintf(format, args...)) w.warnings = append(w.warnings, fmt.Sprintf(format, args...))
} }

View file

@ -87,7 +87,7 @@ func (a *Applier) SetKubeConfig(kubeConfig []byte) error {
// CheckLicense checks the given Constellation license with the license server // CheckLicense checks the given Constellation license with the license server
// and returns the allowed quota for the license. // and returns the allowed quota for the license.
func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider, initRequest bool, licenseID string) (int, error) { func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider, initRequest bool, licenseID string) (int, error) {
a.log.Debugf("Contacting license server for license '%s'", licenseID) a.log.Debug("Contacting license server for license '%s'", licenseID)
var action license.Action var action license.Action
if initRequest { if initRequest {
@ -103,14 +103,14 @@ func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider,
if err != nil { if err != nil {
return 0, fmt.Errorf("checking license: %w", err) return 0, fmt.Errorf("checking license: %w", err)
} }
a.log.Debugf("Got response from license server for license '%s'", licenseID) a.log.Debug("Got response from license server for license '%s'", licenseID)
return quota, nil return quota, nil
} }
// GenerateMasterSecret generates a new master secret. // GenerateMasterSecret generates a new master secret.
func (a *Applier) GenerateMasterSecret() (uri.MasterSecret, error) { func (a *Applier) GenerateMasterSecret() (uri.MasterSecret, error) {
a.log.Debugf("Generating master secret") a.log.Debug("Generating master secret")
key, err := crypto.GenerateRandomBytes(crypto.MasterSecretLengthDefault) key, err := crypto.GenerateRandomBytes(crypto.MasterSecretLengthDefault)
if err != nil { if err != nil {
return uri.MasterSecret{}, err return uri.MasterSecret{}, err
@ -123,17 +123,17 @@ func (a *Applier) GenerateMasterSecret() (uri.MasterSecret, error) {
Key: key, Key: key,
Salt: salt, Salt: salt,
} }
a.log.Debugf("Generated master secret key and salt values") a.log.Debug("Generated master secret key and salt values")
return secret, nil return secret, nil
} }
// GenerateMeasurementSalt generates a new measurement salt. // GenerateMeasurementSalt generates a new measurement salt.
func (a *Applier) GenerateMeasurementSalt() ([]byte, error) { func (a *Applier) GenerateMeasurementSalt() ([]byte, error) {
a.log.Debugf("Generating measurement salt") a.log.Debug("Generating measurement salt")
measurementSalt, err := crypto.GenerateRandomBytes(crypto.RNGLengthDefault) measurementSalt, err := crypto.GenerateRandomBytes(crypto.RNGLengthDefault)
if err != nil { if err != nil {
return nil, fmt.Errorf("generating measurement salt: %w", err) return nil, fmt.Errorf("generating measurement salt: %w", err)
} }
a.log.Debugf("Generated measurement salt") a.log.Debug("Generated measurement salt")
return measurementSalt, nil return measurementSalt, nil
} }

View file

@ -9,6 +9,7 @@ package constellation
import ( import (
"context" "context"
"testing" "testing"
"log/slog"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/crypto" "github.com/edgelesssys/constellation/v2/internal/crypto"

View file

@ -85,21 +85,21 @@ func (a *Applier) Init(
// Create a wrapper function that allows logging any returned error from the retrier before checking if it's the expected retriable one. // Create a wrapper function that allows logging any returned error from the retrier before checking if it's the expected retriable one.
serviceIsUnavailable := func(err error) bool { serviceIsUnavailable := func(err error) bool {
isServiceUnavailable := grpcRetry.ServiceIsUnavailable(err) isServiceUnavailable := grpcRetry.ServiceIsUnavailable(err)
a.log.Debugf("Encountered error (retriable: %t): %s", isServiceUnavailable, err) a.log.Debug("Encountered error (retriable: %t): %s", isServiceUnavailable, err)
return isServiceUnavailable return isServiceUnavailable
} }
// Perform the RPC // Perform the RPC
a.log.Debugf("Making initialization call, doer is %+v", doer) a.log.Debug("Making initialization call, doer is %+v", doer)
a.spinner.Start("Connecting ", false) a.spinner.Start("Connecting ", false)
retrier := retry.NewIntervalRetrier(doer, 30*time.Second, serviceIsUnavailable) retrier := retry.NewIntervalRetrier(doer, 30*time.Second, serviceIsUnavailable)
if err := retrier.Do(ctx); err != nil { if err := retrier.Do(ctx); err != nil {
return InitOutput{}, fmt.Errorf("doing init call: %w", err) return InitOutput{}, fmt.Errorf("doing init call: %w", err)
} }
a.spinner.Stop() a.spinner.Stop()
a.log.Debugf("Initialization request finished") a.log.Debug("Initialization request finished")
a.log.Debugf("Rewriting cluster server address in kubeconfig to %s", state.Infrastructure.ClusterEndpoint) a.log.Debug("Rewriting cluster server address in kubeconfig to %s", state.Infrastructure.ClusterEndpoint)
kubeconfig, err := clientcmd.Load(doer.resp.Kubeconfig) kubeconfig, err := clientcmd.Load(doer.resp.Kubeconfig)
if err != nil { if err != nil {
return InitOutput{}, fmt.Errorf("loading kubeconfig: %w", err) return InitOutput{}, fmt.Errorf("loading kubeconfig: %w", err)
@ -175,7 +175,7 @@ func (d *initDoer) Do(ctx context.Context) error {
conn, err := d.dialer.Dial(ctx, d.endpoint) conn, err := d.dialer.Dial(ctx, d.endpoint)
if err != nil { if err != nil {
d.log.Debugf("Dialing init server failed: %s. Retrying...", err) d.log.Debug("Dialing init server failed: %s. Retrying...", err)
return fmt.Errorf("dialing init server: %w", err) return fmt.Errorf("dialing init server: %w", err)
} }
defer conn.Close() defer conn.Close()
@ -188,7 +188,7 @@ func (d *initDoer) Do(ctx context.Context) error {
d.handleGRPCStateChanges(grpcStateLogCtx, &wg, conn) d.handleGRPCStateChanges(grpcStateLogCtx, &wg, conn)
protoClient := initproto.NewAPIClient(conn) protoClient := initproto.NewAPIClient(conn)
d.log.Debugf("Created protoClient") d.log.Debug("Created protoClient")
resp, err := protoClient.Init(ctx, d.req) resp, err := protoClient.Init(ctx, d.req)
if err != nil { if err != nil {
return &NonRetriableInitError{ return &NonRetriableInitError{
@ -200,7 +200,7 @@ func (d *initDoer) Do(ctx context.Context) error {
res, err := resp.Recv() // get first response, either success or failure res, err := resp.Recv() // get first response, either success or failure
if err != nil { if err != nil {
if e := d.getLogs(resp); e != nil { if e := d.getLogs(resp); e != nil {
d.log.Debugf("Failed to collect logs: %s", e) d.log.Debug("Failed to collect logs: %s", e)
return &NonRetriableInitError{ return &NonRetriableInitError{
LogCollectionErr: e, LogCollectionErr: e,
Err: err, Err: err,
@ -214,7 +214,7 @@ func (d *initDoer) Do(ctx context.Context) error {
d.resp = res.GetInitSuccess() d.resp = res.GetInitSuccess()
case *initproto.InitResponse_InitFailure: case *initproto.InitResponse_InitFailure:
if e := d.getLogs(resp); e != nil { if e := d.getLogs(resp); e != nil {
d.log.Debugf("Failed to get logs from cluster: %s", e) d.log.Debug("Failed to get logs from cluster: %s", e)
return &NonRetriableInitError{ return &NonRetriableInitError{
LogCollectionErr: e, LogCollectionErr: e,
Err: errors.New(res.GetInitFailure().GetError()), Err: errors.New(res.GetInitFailure().GetError()),
@ -222,10 +222,10 @@ func (d *initDoer) Do(ctx context.Context) error {
} }
return &NonRetriableInitError{Err: errors.New(res.GetInitFailure().GetError())} return &NonRetriableInitError{Err: errors.New(res.GetInitFailure().GetError())}
case nil: case nil:
d.log.Debugf("Cluster returned nil response type") d.log.Debug("Cluster returned nil response type")
err = errors.New("empty response from cluster") err = errors.New("empty response from cluster")
if e := d.getLogs(resp); e != nil { if e := d.getLogs(resp); e != nil {
d.log.Debugf("Failed to collect logs: %s", e) d.log.Debug("Failed to collect logs: %s", e)
return &NonRetriableInitError{ return &NonRetriableInitError{
LogCollectionErr: e, LogCollectionErr: e,
Err: err, Err: err,
@ -233,10 +233,10 @@ func (d *initDoer) Do(ctx context.Context) error {
} }
return &NonRetriableInitError{Err: err} return &NonRetriableInitError{Err: err}
default: default:
d.log.Debugf("Cluster returned unknown response type") d.log.Debug("Cluster returned unknown response type")
err = errors.New("unknown response from cluster") err = errors.New("unknown response from cluster")
if e := d.getLogs(resp); e != nil { if e := d.getLogs(resp); e != nil {
d.log.Debugf("Failed to collect logs: %s", e) d.log.Debug("Failed to collect logs: %s", e)
return &NonRetriableInitError{ return &NonRetriableInitError{
LogCollectionErr: e, LogCollectionErr: e,
Err: err, Err: err,
@ -249,7 +249,7 @@ func (d *initDoer) Do(ctx context.Context) error {
// getLogs retrieves the cluster logs from the bootstrapper and saves them in the initDoer. // getLogs retrieves the cluster logs from the bootstrapper and saves them in the initDoer.
func (d *initDoer) getLogs(resp initproto.API_InitClient) error { func (d *initDoer) getLogs(resp initproto.API_InitClient) error {
d.log.Debugf("Attempting to collect cluster logs") d.log.Debug("Attempting to collect cluster logs")
for { for {
res, err := resp.Recv() res, err := resp.Recv()
if err == io.EOF { if err == io.EOF {
@ -277,7 +277,7 @@ func (d *initDoer) getLogs(resp initproto.API_InitClient) error {
} }
} }
d.log.Debugf("Received cluster logs") d.log.Debug("Received cluster logs")
return nil return nil
} }

View file

@ -16,6 +16,7 @@ import (
"strconv" "strconv"
"testing" "testing"
"time" "time"
"log/slog"
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto" "github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"

View file

@ -26,7 +26,7 @@ type crdLister interface {
// BackupCRDs backs up all CRDs to the upgrade workspace. // BackupCRDs backs up all CRDs to the upgrade workspace.
func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgradeDir string) ([]apiextensionsv1.CustomResourceDefinition, error) { func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgradeDir string) ([]apiextensionsv1.CustomResourceDefinition, error) {
k.log.Debugf("Starting CRD backup") k.log.Debug("Starting CRD backup")
crds, err := k.kubectl.ListCRDs(ctx) crds, err := k.kubectl.ListCRDs(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("getting CRDs: %w", err) return nil, fmt.Errorf("getting CRDs: %w", err)
@ -39,7 +39,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr
for i := range crds { for i := range crds {
path := filepath.Join(crdBackupFolder, crds[i].Name+".yaml") path := filepath.Join(crdBackupFolder, crds[i].Name+".yaml")
k.log.Debugf("Creating CRD backup: %s", path) k.log.Debug("Creating CRD backup: %s", path)
// We have to manually set kind/apiversion because of a long-standing limitation of the API: // We have to manually set kind/apiversion because of a long-standing limitation of the API:
// https://github.com/kubernetes/kubernetes/issues/3030#issuecomment-67543738 // https://github.com/kubernetes/kubernetes/issues/3030#issuecomment-67543738
@ -56,15 +56,15 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr
return nil, err return nil, err
} }
} }
k.log.Debugf("CRD backup complete") k.log.Debug("CRD backup complete")
return crds, nil return crds, nil
} }
// BackupCRs backs up all CRs to the upgrade workspace. // BackupCRs backs up all CRs to the upgrade workspace.
func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error { func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error {
k.log.Debugf("Starting CR backup") k.log.Debug("Starting CR backup")
for _, crd := range crds { for _, crd := range crds {
k.log.Debugf("Creating backup for resource type: %s", crd.Name) k.log.Debug("Creating backup for resource type: %s", crd.Name)
// Iterate over all versions of the CRD // Iterate over all versions of the CRD
// TODO(daniel-weisse): Consider iterating over crd.Status.StoredVersions instead // TODO(daniel-weisse): Consider iterating over crd.Status.StoredVersions instead
@ -72,7 +72,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds
// a version that is not installed in the cluster. // a version that is not installed in the cluster.
// With the StoredVersions field, we could only iterate over the installed versions. // With the StoredVersions field, we could only iterate over the installed versions.
for _, version := range crd.Spec.Versions { for _, version := range crd.Spec.Versions {
k.log.Debugf("Creating backup of CRs for %q at version %q", crd.Name, version.Name) k.log.Debug("Creating backup of CRs for %q at version %q", crd.Name, version.Name)
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version.Name, Resource: crd.Spec.Names.Plural} gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version.Name, Resource: crd.Spec.Names.Plural}
crs, err := k.kubectl.ListCRs(ctx, gvr) crs, err := k.kubectl.ListCRs(ctx, gvr)
@ -80,7 +80,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds
if !k8serrors.IsNotFound(err) { if !k8serrors.IsNotFound(err) {
return fmt.Errorf("retrieving CR %s: %w", crd.Name, err) return fmt.Errorf("retrieving CR %s: %w", crd.Name, err)
} }
k.log.Debugf("No CRs found for %q at version %q, skipping...", crd.Name, version.Name) k.log.Debug("No CRs found for %q at version %q, skipping...", crd.Name, version.Name)
continue continue
} }
@ -101,9 +101,9 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds
} }
} }
k.log.Debugf("Backup for resource type %q complete", crd.Name) k.log.Debug("Backup for resource type %q complete", crd.Name)
} }
k.log.Debugf("CR backup complete") k.log.Debug("CR backup complete")
return nil return nil
} }

View file

@ -166,8 +166,7 @@ func TestBackupCRs(t *testing.T) {
type stubLog struct{} type stubLog struct{}
func (s stubLog) Debugf(_ string, _ ...any) {} func (s stubLog) Debug(_ string, _ ...any) {}
func (s stubLog) Sync() {}
func (c stubKubectl) ListCRDs(_ context.Context) ([]apiextensionsv1.CustomResourceDefinition, error) { func (c stubKubectl) ListCRDs(_ context.Context) ([]apiextensionsv1.CustomResourceDefinition, error) {
if c.getCRDsError != nil { if c.getCRDsError != nil {

View file

@ -13,6 +13,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"time" "time"
"log/slog"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/compatibility" "github.com/edgelesssys/constellation/v2/internal/compatibility"

View file

@ -30,15 +30,15 @@ func LogStateChangesUntilReady(ctx context.Context, conn getStater, log debugLog
go func() { go func() {
defer wg.Done() defer wg.Done()
state := conn.GetState() state := conn.GetState()
log.Debugf("Connection state started as %s", state) log.Debug("Connection state started as %s", state)
for ; state != connectivity.Ready && conn.WaitForStateChange(ctx, state); state = conn.GetState() { for ; state != connectivity.Ready && conn.WaitForStateChange(ctx, state); state = conn.GetState() {
log.Debugf("Connection state changed to %s", state) log.Debug("Connection state changed to %s", state)
} }
if state == connectivity.Ready { if state == connectivity.Ready {
log.Debugf("Connection ready") log.Debug("Connection ready")
isReadyCallback() isReadyCallback()
} else { } else {
log.Debugf("Connection state ended with %s", state) log.Debug("Connection state ended with %s", state)
} }
}() }()
} }
@ -49,5 +49,5 @@ type getStater interface {
} }
type debugLog interface { type debugLog interface {
Debugf(format string, args ...any) Debug(format string, args ...any)
} }

View file

@ -88,7 +88,7 @@ type spyLog struct {
msgs []string msgs []string
} }
func (f *spyLog) Debugf(format string, args ...any) { func (f *spyLog) Debug(format string, args ...any) {
f.msgs = append(f.msgs, fmt.Sprintf(format, args...)) f.msgs = append(f.msgs, fmt.Sprintf(format, args...))
} }

View file

@ -158,6 +158,7 @@ type TestWriter struct {
} }
func (t TestWriter) Write(p []byte) (int, error) { func (t TestWriter) Write(p []byte) (int, error) {
t.T.Helper()
t.T.Log(p) t.T.Log(p)
return len(p), nil return len(p), nil
} }

View file

@ -8,6 +8,7 @@ SPDX-License-Identifier: AGPL-3.0-only
package archive package archive
import ( import (
"fmt"
"context" "context"
"io" "io"
"log/slog" "log/slog"
@ -73,7 +74,7 @@ func (a *Archivist) Archive(ctx context.Context, version versionsapi.Version, cs
if err != nil { if err != nil {
return "", err return "", err
} }
a.log.Debug("Archiving OS image %s %s %v to s3://%v/%v", csp, attestationVariant, version.ShortPath(), a.bucket, key) a.log.Debug(fmt.Sprintf("Archiving OS image %s %s %v to s3://%v/%v", csp, attestationVariant, version.ShortPath(), a.bucket, key))
_, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{ _, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{
Bucket: &a.bucket, Bucket: &a.bucket,
Key: &key, Key: &key,

View file

@ -103,7 +103,7 @@ func (u *Uploader) Upload(ctx context.Context, req *osimage.UploadRequest) ([]ve
} }
defer func() { defer func() {
if err := u.ensureBlobDeleted(ctx, blobName); err != nil { if err := u.ensureBlobDeleted(ctx, blobName); err != nil {
u.log.Errorf("post-cleaning: deleting temporary blob from s3", err) u.log.Error("post-cleaning: deleting temporary blob from s3", err)
} }
}() }()
snapshotID, err := u.importSnapshot(ctx, blobName, imageName) snapshotID, err := u.importSnapshot(ctx, blobName, imageName)
@ -163,14 +163,14 @@ func (u *Uploader) ensureBucket(ctx context.Context) error {
Bucket: &u.bucketName, Bucket: &u.bucketName,
}) })
if err == nil { if err == nil {
u.log.Debug("Bucket %s exists", u.bucketName) u.log.Debug(fmt.Sprintf("Bucket %s exists", u.bucketName))
return nil return nil
} }
var noSuchBucketErr *types.NoSuchBucket var noSuchBucketErr *types.NoSuchBucket
if !errors.As(err, &noSuchBucketErr) { if !errors.As(err, &noSuchBucketErr) {
return fmt.Errorf("determining if bucket %s exists: %w", u.bucketName, err) return fmt.Errorf("determining if bucket %s exists: %w", u.bucketName, err)
} }
u.log.Debug("Creating bucket %s", u.bucketName) u.log.Debug(fmt.Sprintf("Creating bucket %s", u.bucketName))
_, err = s3C.CreateBucket(ctx, &s3.CreateBucketInput{ _, err = s3C.CreateBucket(ctx, &s3.CreateBucketInput{
Bucket: &u.bucketName, Bucket: &u.bucketName,
}) })
@ -181,7 +181,7 @@ func (u *Uploader) ensureBucket(ctx context.Context) error {
} }
func (u *Uploader) uploadBlob(ctx context.Context, blobName string, img io.Reader) error { func (u *Uploader) uploadBlob(ctx context.Context, blobName string, img io.Reader) error {
u.log.Debug("Uploading os image as %s", blobName) u.log.Debug(fmt.Sprintf("Uploading os image as %s", blobName))
uploadC, err := u.s3uploader(ctx, u.region) uploadC, err := u.s3uploader(ctx, u.region)
if err != nil { if err != nil {
return err return err
@ -212,7 +212,7 @@ func (u *Uploader) ensureBlobDeleted(ctx context.Context, blobName string) error
if err != nil { if err != nil {
return err return err
} }
u.log.Debug("Deleting blob %s", blobName) u.log.Debug(fmt.Sprintf("Deleting blob %s", blobName))
_, err = s3C.DeleteObject(ctx, &s3.DeleteObjectInput{ _, err = s3C.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: &u.bucketName, Bucket: &u.bucketName,
Key: &blobName, Key: &blobName,
@ -272,7 +272,7 @@ func (u *Uploader) importSnapshot(ctx context.Context, blobName, snapshotName st
if importResp.ImportTaskId == nil { if importResp.ImportTaskId == nil {
return "", fmt.Errorf("importing snapshot: no import task ID returned") return "", fmt.Errorf("importing snapshot: no import task ID returned")
} }
u.log.Debug("Waiting for snapshot %s to be ready", snapshotName) u.log.Debug(fmt.Sprintf("Waiting for snapshot %s to be ready", snapshotName))
return waitForSnapshotImport(ctx, ec2C, *importResp.ImportTaskId) return waitForSnapshotImport(ctx, ec2C, *importResp.ImportTaskId)
} }
@ -464,7 +464,7 @@ func (u *Uploader) ensureImageDeleted(ctx context.Context, imageName, region str
} }
snapshotID, err := getBackingSnapshotID(ctx, ec2C, amiID) snapshotID, err := getBackingSnapshotID(ctx, ec2C, amiID)
if err == errAMIDoesNotExist { if err == errAMIDoesNotExist {
u.log.Debug("Image %s doesn't exist. Nothing to clean up.", amiID) u.log.Debug(fmt.Sprintf("Image %s doesn't exist. Nothing to clean up.", amiID))
return nil return nil
} }
u.log.Debug("Deleting image %s in %s with backing snapshot", amiID, region) u.log.Debug("Deleting image %s in %s with backing snapshot", amiID, region)

View file

@ -345,10 +345,10 @@ func (u *Uploader) ensureSIG(ctx context.Context, sigName string) error {
func (u *Uploader) ensureImageDefinition(ctx context.Context, sigName, definitionName string, version versionsapi.Version, attestationVariant string) error { func (u *Uploader) ensureImageDefinition(ctx context.Context, sigName, definitionName string, version versionsapi.Version, attestationVariant string) error {
_, err := u.image.Get(ctx, u.resourceGroup, sigName, definitionName, &armcomputev5.GalleryImagesClientGetOptions{}) _, err := u.image.Get(ctx, u.resourceGroup, sigName, definitionName, &armcomputev5.GalleryImagesClientGetOptions{})
if err == nil { if err == nil {
u.log.Debug("Image definition %s/%s in %s exists", sigName, definitionName, u.resourceGroup) u.log.Debug(fmt.Sprintf("Image definition %s/%s in %s exists", sigName, definitionName, u.resourceGroup))
return nil return nil
} }
u.log.Debug("Creating image definition %s/%s in %s", sigName, definitionName, u.resourceGroup) u.log.Debug(fmt.Sprintf("Creating image definition %s/%s in %s", sigName, definitionName, u.resourceGroup))
var securityType string var securityType string
// TODO(malt3): This needs to allow the *Supported or the normal variant // TODO(malt3): This needs to allow the *Supported or the normal variant
// based on wether a VMGS was provided or not. // based on wether a VMGS was provided or not.
@ -472,7 +472,7 @@ func (u *Uploader) getImageReference(ctx context.Context, sigName, definitionNam
return "", fmt.Errorf("image gallery %s in %s is a community gallery but has no public names", sigName, u.resourceGroup) return "", fmt.Errorf("image gallery %s in %s is a community gallery but has no public names", sigName, u.resourceGroup)
} }
communityGalleryName := *galleryResp.Properties.SharingProfile.CommunityGalleryInfo.PublicNames[0] communityGalleryName := *galleryResp.Properties.SharingProfile.CommunityGalleryInfo.PublicNames[0]
u.log.Debug("Image gallery %s in %s is shared. Using community identifier in %s", sigName, u.resourceGroup, communityGalleryName) u.log.Debug(fmt.Sprintf("Image gallery %s in %s is shared. Using community identifier in %s", sigName, u.resourceGroup, communityGalleryName))
communityVersionResp, err := u.communityVersions.Get(ctx, u.location, communityGalleryName, communityVersionResp, err := u.communityVersions.Get(ctx, u.location, communityGalleryName,
definitionName, versionName, definitionName, versionName,
&armcomputev5.CommunityGalleryImageVersionsClientGetOptions{}, &armcomputev5.CommunityGalleryImageVersionsClientGetOptions{},

View file

@ -98,13 +98,13 @@ func (u *Uploader) Upload(ctx context.Context, req *osimage.UploadRequest) ([]ve
func (u *Uploader) ensureBucket(ctx context.Context) error { func (u *Uploader) ensureBucket(ctx context.Context) error {
_, err := u.bucket.Attrs(ctx) _, err := u.bucket.Attrs(ctx)
if err == nil { if err == nil {
u.log.Debug("Bucket %s exists", u.bucketName) u.log.Debug(fmt.Sprintf("Bucket %s exists", u.bucketName))
return nil return nil
} }
if err != storage.ErrBucketNotExist { if err != storage.ErrBucketNotExist {
return err return err
} }
u.log.Debug("Creating bucket %s", u.bucketName) u.log.Debug(fmt.Sprintf("Creating bucket %s", u.bucketName))
return u.bucket.Create(ctx, u.project, &storage.BucketAttrs{ return u.bucket.Create(ctx, u.project, &storage.BucketAttrs{
PublicAccessPrevention: storage.PublicAccessPreventionEnforced, PublicAccessPrevention: storage.PublicAccessPreventionEnforced,
Location: u.location, Location: u.location,
@ -112,7 +112,7 @@ func (u *Uploader) ensureBucket(ctx context.Context) error {
} }
func (u *Uploader) uploadBlob(ctx context.Context, blobName string, img io.Reader) error { func (u *Uploader) uploadBlob(ctx context.Context, blobName string, img io.Reader) error {
u.log.Debug("Uploading os image as %s", blobName) u.log.Debug(fmt.Sprintf("Uploading os image as %s", blobName))
writer := u.bucket.Object(blobName).NewWriter(ctx) writer := u.bucket.Object(blobName).NewWriter(ctx)
_, err := io.Copy(writer, img) _, err := io.Copy(writer, img)
if err != nil { if err != nil {
@ -130,12 +130,12 @@ func (u *Uploader) ensureBlobDeleted(ctx context.Context, blobName string) error
if err != nil { if err != nil {
return err return err
} }
u.log.Debug("Deleting blob %s", blobName) u.log.Debug(fmt.Sprintf("Deleting blob %s", blobName))
return u.bucket.Object(blobName).Delete(ctx) return u.bucket.Object(blobName).Delete(ctx)
} }
func (u *Uploader) createImage(ctx context.Context, version versionsapi.Version, imageName, blobName string, enableSecureBoot bool, sbDatabase secureboot.Database) (string, error) { func (u *Uploader) createImage(ctx context.Context, version versionsapi.Version, imageName, blobName string, enableSecureBoot bool, sbDatabase secureboot.Database) (string, error) {
u.log.Debug("Creating image %s", imageName) u.log.Debug(fmt.Sprintf("Creating image %s", imageName))
blobURL := u.blobURL(blobName) blobURL := u.blobURL(blobName)
family := u.imageFamily(version) family := u.imageFamily(version)
var initialState *computepb.InitialStateConfig var initialState *computepb.InitialStateConfig
@ -206,10 +206,10 @@ func (u *Uploader) ensureImageDeleted(ctx context.Context, imageName string) err
Project: u.project, Project: u.project,
}) })
if err != nil { if err != nil {
u.log.Debug("Image %s doesn't exist. Nothing to clean up.", imageName) u.log.Debug(fmt.Sprintf("Image %s doesn't exist. Nothing to clean up.", imageName))
return nil return nil
} }
u.log.Debug("Deleting image %s", imageName) u.log.Debug(fmt.Sprintf("Deleting image %s", imageName))
op, err := u.image.Delete(ctx, &computepb.DeleteImageRequest{ op, err := u.image.Delete(ctx, &computepb.DeleteImageRequest{
Image: imageName, Image: imageName,
Project: u.project, Project: u.project,

View file

@ -10,9 +10,9 @@ package nop
import ( import (
"context" "context"
"log/slog" "log/slog"
"fmt"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/osimage" "github.com/edgelesssys/constellation/v2/internal/osimage"
) )
@ -28,6 +28,6 @@ func New(log *slog.Logger) *Uploader {
// Upload pretends to upload images to a csp. // Upload pretends to upload images to a csp.
func (u *Uploader) Upload(_ context.Context, req *osimage.UploadRequest) ([]versionsapi.ImageInfoEntry, error) { func (u *Uploader) Upload(_ context.Context, req *osimage.UploadRequest) ([]versionsapi.ImageInfoEntry, error) {
u.log.Debug("Skipping image upload of %s since this CSP does not require images to be uploaded in advance.", req.Version.ShortPath()) u.log.Debug(fmt.Sprintf("Skipping image upload of %s since this CSP does not require images to be uploaded in advance.", req.Version.ShortPath()))
return nil, nil return nil, nil
} }

View file

@ -8,6 +8,7 @@ package verify
import ( import (
"strings" "strings"
"testing" "testing"
"log/slog"
"github.com/edgelesssys/constellation/v2/internal/attestation/snp/testdata" "github.com/edgelesssys/constellation/v2/internal/attestation/snp/testdata"
"github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/logger"

View file

@ -16,6 +16,7 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"time" "time"
"fmt"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/attestation/variant"
@ -129,7 +130,7 @@ func main() {
defer watcher.Close() defer watcher.Close()
go func() { go func() {
log.Info("starting file watcher for measurements file %s", filepath.Join(constants.ServiceBasePath, constants.AttestationConfigFilename)) log.Info(fmt.Sprintf("starting file watcher for measurements file %s", filepath.Join(constants.ServiceBasePath, constants.AttestationConfigFilename)))
if err := watcher.Watch(filepath.Join(constants.ServiceBasePath, constants.AttestationConfigFilename)); err != nil { if err := watcher.Watch(filepath.Join(constants.ServiceBasePath, constants.AttestationConfigFilename)); err != nil {
log.With(slog.Any("error", err)).Error("Failed to watch measurements file") log.With(slog.Any("error", err)).Error("Failed to watch measurements file")
os.Exit(1) os.Exit(1)

View file

@ -16,7 +16,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/afero" "github.com/spf13/afero"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"

View file

@ -13,8 +13,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/attestation" "github.com/edgelesssys/constellation/v2/internal/attestation"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/internal/versions/components"
"github.com/edgelesssys/constellation/v2/joinservice/joinproto" "github.com/edgelesssys/constellation/v2/joinservice/joinproto"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"

View file

@ -79,7 +79,7 @@ func (u *Updatable) Update() error {
if err != nil { if err != nil {
return fmt.Errorf("unmarshaling config: %w", err) return fmt.Errorf("unmarshaling config: %w", err)
} }
u.log.Debug("New expected measurements: %+v", cfg.GetMeasurements()) u.log.Debug(fmt.Sprintf("New expected measurements: %+v", cfg.GetMeasurements()))
cfgWithCerts, err := u.configWithCerts(cfg) cfgWithCerts, err := u.configWithCerts(cfg)
if err != nil { if err != nil {

View file

@ -15,6 +15,7 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"time" "time"
"fmt"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/crypto" "github.com/edgelesssys/constellation/v2/internal/crypto"
@ -46,7 +47,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
if len(masterKey) < crypto.MasterSecretLengthMin { if len(masterKey) < crypto.MasterSecretLengthMin {
log.With(slog.Any("error", errors.New("invalid key length"))).Error("Provided master secret is smaller than the required minimum of %d bytes", crypto.MasterSecretLengthMin) log.With(slog.Any("error", errors.New("invalid key length"))).Error(fmt.Sprintf("Provided master secret is smaller than the required minimum of %d bytes", crypto.MasterSecretLengthMin))
os.Exit(1) os.Exit(1)
} }
salt, err := file.Read(*saltPath) salt, err := file.Read(*saltPath)
@ -55,7 +56,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
if len(salt) < crypto.RNGLengthDefault { if len(salt) < crypto.RNGLengthDefault {
log.With(slog.Any("error", errors.New("invalid salt length"))).Error("Expected salt to be %d bytes, but got %d", crypto.RNGLengthDefault, len(salt)) log.With(slog.Any("error", errors.New("invalid salt length"))).Error(fmt.Sprintf("Expected salt to be %d bytes, but got %d", crypto.RNGLengthDefault, len(salt)))
os.Exit(1) os.Exit(1)
} }
masterSecret := uri.MasterSecret{Key: masterKey, Salt: salt} masterSecret := uri.MasterSecret{Key: masterKey, Salt: salt}

View file

@ -53,7 +53,7 @@ func (s *Server) Run(port string) error {
logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, s.log.Handler())).WithGroup("gRPC")) logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, s.log.Handler())).WithGroup("gRPC"))
// start the server // start the server
s.log.Info("Starting Constellation key management service on %s", listener.Addr().String()) s.log.Info(fmt.Sprintf("Starting Constellation key management service on %s", listener.Addr().String()))
return server.Serve(listener) return server.Serve(listener)
} }

View file

@ -15,7 +15,6 @@ import (
"fmt" "fmt"
"log/slog" "log/slog"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/keyservice/keyserviceproto" "github.com/edgelesssys/constellation/v2/keyservice/keyserviceproto"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"

View file

@ -13,7 +13,6 @@ import (
"log/slog" "log/slog"
"net/http" "net/http"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/s3proxy/internal/s3" "github.com/edgelesssys/constellation/v2/s3proxy/internal/s3"
) )
@ -151,41 +150,41 @@ func handleForwards(log *slog.Logger) http.HandlerFunc {
} }
// handleCreateMultipartUpload logs the request and blocks with an error message. // handleCreateMultipartUpload logs the request and blocks with an error message.
func handleCreateMultipartUpload(log *logger.Logger) http.HandlerFunc { func handleCreateMultipartUpload(log *slog.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) { return func(w http.ResponseWriter, req *http.Request) {
log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debugf("intercepting CreateMultipartUpload") log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debug("intercepting CreateMultipartUpload")
log.Errorf("Blocking CreateMultipartUpload request") log.Error("Blocking CreateMultipartUpload request")
http.Error(w, "s3proxy is configured to block CreateMultipartUpload requests", http.StatusNotImplemented) http.Error(w, "s3proxy is configured to block CreateMultipartUpload requests", http.StatusNotImplemented)
} }
} }
// handleUploadPart logs the request and blocks with an error message. // handleUploadPart logs the request and blocks with an error message.
func handleUploadPart(log *logger.Logger) http.HandlerFunc { func handleUploadPart(log *slog.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) { return func(w http.ResponseWriter, req *http.Request) {
log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debugf("intercepting UploadPart") log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debug("intercepting UploadPart")
log.Errorf("Blocking UploadPart request") log.Error("Blocking UploadPart request")
http.Error(w, "s3proxy is configured to block UploadPart requests", http.StatusNotImplemented) http.Error(w, "s3proxy is configured to block UploadPart requests", http.StatusNotImplemented)
} }
} }
// handleCompleteMultipartUpload logs the request and blocks with an error message. // handleCompleteMultipartUpload logs the request and blocks with an error message.
func handleCompleteMultipartUpload(log *logger.Logger) http.HandlerFunc { func handleCompleteMultipartUpload(log *slog.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) { return func(w http.ResponseWriter, req *http.Request) {
log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debugf("intercepting CompleteMultipartUpload") log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debug("intercepting CompleteMultipartUpload")
log.Errorf("Blocking CompleteMultipartUpload request") log.Error("Blocking CompleteMultipartUpload request")
http.Error(w, "s3proxy is configured to block CompleteMultipartUpload requests", http.StatusNotImplemented) http.Error(w, "s3proxy is configured to block CompleteMultipartUpload requests", http.StatusNotImplemented)
} }
} }
// handleAbortMultipartUpload logs the request and blocks with an error message. // handleAbortMultipartUpload logs the request and blocks with an error message.
func handleAbortMultipartUpload(log *logger.Logger) http.HandlerFunc { func handleAbortMultipartUpload(log *slog.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) { return func(w http.ResponseWriter, req *http.Request) {
log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debugf("intercepting AbortMultipartUpload") log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debug("intercepting AbortMultipartUpload")
log.Errorf("Blocking AbortMultipartUpload request") log.Error("Blocking AbortMultipartUpload request")
http.Error(w, "s3proxy is configured to block AbortMultipartUpload requests", http.StatusNotImplemented) http.Error(w, "s3proxy is configured to block AbortMultipartUpload requests", http.StatusNotImplemented)
} }
} }

View file

@ -34,7 +34,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/s3proxy/internal/kms" "github.com/edgelesssys/constellation/v2/s3proxy/internal/kms"
"github.com/edgelesssys/constellation/v2/s3proxy/internal/s3" "github.com/edgelesssys/constellation/v2/s3proxy/internal/s3"
) )
@ -58,7 +57,7 @@ type Router struct {
// s3proxy does not implement those yet. // s3proxy does not implement those yet.
// Setting forwardMultipartReqs to true will forward those requests to the S3 API, otherwise we block them (secure defaults). // Setting forwardMultipartReqs to true will forward those requests to the S3 API, otherwise we block them (secure defaults).
forwardMultipartReqs bool forwardMultipartReqs bool
log *logger.Logger log *slog.Logger
} }
// New creates a new Router. // New creates a new Router.

View file

@ -1240,15 +1240,15 @@ type tfContextLogger struct {
ctx context.Context // bind context to struct to satisfy interface ctx context.Context // bind context to struct to satisfy interface
} }
func (l *tfContextLogger) Debugf(format string, args ...any) { func (l *tfContextLogger) Debug(format string, args ...any) {
tflog.Debug(l.ctx, fmt.Sprintf(format, args...)) tflog.Debug(l.ctx, fmt.Sprintf(format, args...))
} }
func (l *tfContextLogger) Infof(format string, args ...any) { func (l *tfContextLogger) Info(format string, args ...any) {
tflog.Info(l.ctx, fmt.Sprintf(format, args...)) tflog.Info(l.ctx, fmt.Sprintf(format, args...))
} }
func (l *tfContextLogger) Warnf(format string, args ...any) { func (l *tfContextLogger) Warn(format string, args ...any) {
tflog.Warn(l.ctx, fmt.Sprintf(format, args...)) tflog.Warn(l.ctx, fmt.Sprintf(format, args...))
} }

View file

@ -101,7 +101,7 @@ func (s *Server) Stop() {
// ExecuteUpdate installs & verifies the provided kubeadm, then executes `kubeadm upgrade plan` & `kubeadm upgrade apply {wanted_Kubernetes_Version}` to upgrade to the specified version. // ExecuteUpdate installs & verifies the provided kubeadm, then executes `kubeadm upgrade plan` & `kubeadm upgrade apply {wanted_Kubernetes_Version}` to upgrade to the specified version.
func (s *Server) ExecuteUpdate(ctx context.Context, updateRequest *upgradeproto.ExecuteUpdateRequest) (*upgradeproto.ExecuteUpdateResponse, error) { func (s *Server) ExecuteUpdate(ctx context.Context, updateRequest *upgradeproto.ExecuteUpdateRequest) (*upgradeproto.ExecuteUpdateResponse, error) {
s.log.Info("Upgrade to Kubernetes version started: %s", updateRequest.WantedKubernetesVersion) s.log.Info(fmt.Sprintf("Upgrade to Kubernetes version started: %s", updateRequest.WantedKubernetesVersion))
installer := installer.NewOSInstaller() installer := installer.NewOSInstaller()
err := prepareUpdate(ctx, installer, updateRequest) err := prepareUpdate(ctx, installer, updateRequest)
@ -121,7 +121,7 @@ func (s *Server) ExecuteUpdate(ctx context.Context, updateRequest *upgradeproto.
return nil, status.Errorf(codes.Internal, "unable to execute kubeadm upgrade apply: %s: %s", err, string(out)) return nil, status.Errorf(codes.Internal, "unable to execute kubeadm upgrade apply: %s: %s", err, string(out))
} }
s.log.Info("Upgrade to Kubernetes version succeeded: %s", updateRequest.WantedKubernetesVersion) s.log.Info(fmt.Sprintf("Upgrade to Kubernetes version succeeded: %s", updateRequest.WantedKubernetesVersion))
return &upgradeproto.ExecuteUpdateResponse{}, nil return &upgradeproto.ExecuteUpdateResponse{}, nil
} }

View file

@ -20,8 +20,8 @@ import (
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer" "github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/verify/verifyproto" "github.com/edgelesssys/constellation/v2/verify/verifyproto"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"