From 75ceeb2de8638a7cc9dddfe3d715dd117e22a819 Mon Sep 17 00:00:00 2001 From: miampf Date: Wed, 3 Apr 2024 13:49:03 +0000 Subject: [PATCH] logging: unify debug log message format (#2997) --- cli/internal/cmd/apply.go | 12 +++--- cli/internal/cmd/applyhelm.go | 2 +- cli/internal/cmd/applyinit.go | 6 +-- cli/internal/cmd/configfetchmeasurements.go | 6 +-- cli/internal/cmd/configgenerate.go | 4 +- cli/internal/cmd/iamcreate.go | 4 +- cli/internal/cmd/init.go | 4 +- cli/internal/cmd/maapatch.go | 2 +- cli/internal/cmd/recover.go | 12 +++--- cli/internal/cmd/upgradecheck.go | 32 ++++++++-------- cli/internal/cmd/verify.go | 4 +- e2e/malicious-join/malicious-join.go | 6 +-- hack/bazel-deps-mirror/check.go | 10 ++--- hack/bazel-deps-mirror/fix.go | 12 +++--- .../internal/mirror/mirror.go | 20 +++++----- hack/bazel-deps-mirror/upgrade.go | 10 ++--- hack/oci-pin/codegen.go | 6 +-- hack/oci-pin/merge.go | 2 +- hack/oci-pin/sum.go | 6 +-- image/upload/internal/cmd/info.go | 2 +- .../internal/cmd/measurementsenvelope.go | 2 +- .../upload/internal/cmd/measurementsmerge.go | 2 +- .../upload/internal/cmd/measurementsupload.go | 2 +- image/upload/internal/cmd/uplosi.go | 4 +- internal/api/client/client.go | 12 +++--- internal/api/versionsapi/cli/add.go | 7 ++-- internal/api/versionsapi/cli/latest.go | 2 +- internal/api/versionsapi/cli/list.go | 3 +- internal/api/versionsapi/cli/rm.go | 38 ++++++++++--------- internal/api/versionsapi/client.go | 20 +++++----- .../attestation/measurements/measurements.go | 10 +++++ internal/constellation/apply.go | 4 +- internal/constellation/applyinit.go | 16 ++++---- internal/constellation/helm/actionfactory.go | 10 ++--- internal/constellation/helm/retryaction.go | 6 +-- internal/constellation/kubecmd/backup.go | 4 +- internal/constellation/kubecmd/kubecmd.go | 10 ++--- internal/grpc/grpclog/grpclog.go | 6 +-- internal/grpc/grpclog/grpclog_test.go | 10 ++--- internal/osimage/archive/archive.go | 2 +- internal/osimage/imageinfo/imageinfo.go | 2 +- .../measurementsuploader.go | 2 +- internal/osimage/nop/nop.go | 2 +- internal/staticupload/staticupload.go | 4 +- internal/verify/verify.go | 2 +- .../internal/certcache/amdkds/amdkds_test.go | 2 +- joinservice/internal/certcache/certcache.go | 4 +- joinservice/internal/watcher/validator.go | 2 +- 48 files changed, 183 insertions(+), 169 deletions(-) diff --git a/cli/internal/cmd/apply.go b/cli/internal/cmd/apply.go index 0c524302e..2db2f318c 100644 --- a/cli/internal/cmd/apply.go +++ b/cli/internal/cmd/apply.go @@ -449,7 +449,7 @@ func (a *applyCmd) apply( func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationconfigapi.Fetcher) (*config.Config, *state.State, error) { // Read user's config and state file - a.log.Debug(fmt.Sprintf("Reading config from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) + a.log.Debug(fmt.Sprintf("Reading config from %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) conf, err := config.New(a.fileHandler, constants.ConfigFilename, configFetcher, a.flags.force) var configValidationErr *config.ValidationError if errors.As(err, &configValidationErr) { @@ -459,7 +459,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc return nil, nil, err } - a.log.Debug(fmt.Sprintf("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))) + a.log.Debug(fmt.Sprintf("Reading state file from %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))) stateFile, err := state.CreateOrRead(a.fileHandler, constants.StateFilename) if err != nil { return nil, nil, err @@ -528,10 +528,10 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc // If we need to run the init RPC, the version has to be valid // Otherwise, we are able to use an outdated version, meaning we skip the K8s upgrade // We skip version validation if the user explicitly skips the Kubernetes phase - a.log.Debug(fmt.Sprintf("Validating Kubernetes version %s", conf.KubernetesVersion)) + a.log.Debug(fmt.Sprintf("Validating Kubernetes version %q", conf.KubernetesVersion)) validVersion, err := versions.NewValidK8sVersion(string(conf.KubernetesVersion), true) if err != nil { - a.log.Debug(fmt.Sprintf("Kubernetes version not valid: %s", err)) + a.log.Debug(fmt.Sprintf("Kubernetes version not valid: %q", err)) if !a.flags.skipPhases.contains(skipInitPhase) { return nil, nil, err } @@ -570,7 +570,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc cmd.PrintErrf("Warning: Constellation with Kubernetes %s is still in preview. Use only for evaluation purposes.\n", validVersion) } conf.KubernetesVersion = validVersion - a.log.Debug(fmt.Sprintf("Target Kubernetes version set to %s", conf.KubernetesVersion)) + a.log.Debug(fmt.Sprintf("Target Kubernetes version set to %q", conf.KubernetesVersion)) // Validate microservice version (helm versions) in the user's config matches the version of the CLI // This makes sure we catch potential errors early, not just after we already ran Terraform migrations or the init RPC @@ -598,7 +598,7 @@ func (a *applyCmd) applyJoinConfig(cmd *cobra.Command, newConfig config.Attestat ) error { clusterAttestationConfig, err := a.applier.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant()) if err != nil { - a.log.Debug(fmt.Sprintf("Getting cluster attestation config failed: %s", err)) + a.log.Debug(fmt.Sprintf("Getting cluster attestation config failed: %q", err)) if k8serrors.IsNotFound(err) { a.log.Debug("Creating new join config") return a.applier.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt) diff --git a/cli/internal/cmd/applyhelm.go b/cli/internal/cmd/applyhelm.go index b9e1538d6..bd629d348 100644 --- a/cli/internal/cmd/applyhelm.go +++ b/cli/internal/cmd/applyhelm.go @@ -120,7 +120,7 @@ func (a *applyCmd) backupHelmCharts( if err := executor.SaveCharts(chartDir, a.fileHandler); err != nil { return fmt.Errorf("saving Helm charts to disk: %w", err) } - a.log.Debug(fmt.Sprintf("Helm charts saved to %s", a.flags.pathPrefixer.PrefixPrintablePath(chartDir))) + a.log.Debug(fmt.Sprintf("Helm charts saved to %q", a.flags.pathPrefixer.PrefixPrintablePath(chartDir))) if includesUpgrades { a.log.Debug("Creating backup of CRDs and CRs") diff --git a/cli/internal/cmd/applyinit.go b/cli/internal/cmd/applyinit.go index 34ab7f1a9..e3e99b0b4 100644 --- a/cli/internal/cmd/applyinit.go +++ b/cli/internal/cmd/applyinit.go @@ -29,7 +29,7 @@ import ( // On success, it writes the Kubernetes admin config file to disk. // Therefore it is skipped if the Kubernetes admin config file already exists. func (a *applyCmd) runInit(cmd *cobra.Command, conf *config.Config, stateFile *state.State) (*bytes.Buffer, error) { - a.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())) + a.log.Debug(fmt.Sprintf("Creating aTLS Validator for %q", conf.GetAttestationConfig().GetVariant())) validator, err := choose.Validator(conf.GetAttestationConfig(), a.wLog) if err != nil { return nil, fmt.Errorf("creating validator: %w", err) @@ -121,7 +121,7 @@ func (a *applyCmd) writeInitOutput( if err := a.fileHandler.Write(constants.AdminConfFilename, initResp.Kubeconfig, file.OptNone); err != nil { return fmt.Errorf("writing kubeconfig: %w", err) } - a.log.Debug(fmt.Sprintf("Kubeconfig written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))) + a.log.Debug(fmt.Sprintf("Kubeconfig written to %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))) if mergeConfig { if err := a.merger.mergeConfigs(constants.AdminConfFilename, a.fileHandler); err != nil { @@ -136,7 +136,7 @@ func (a *applyCmd) writeInitOutput( return fmt.Errorf("writing Constellation state file: %w", err) } - a.log.Debug(fmt.Sprintf("Constellation state file written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))) + a.log.Debug(fmt.Sprintf("Constellation state file written to %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))) if !mergeConfig { fmt.Fprintln(wr, "You can now connect to your cluster by executing:") diff --git a/cli/internal/cmd/configfetchmeasurements.go b/cli/internal/cmd/configfetchmeasurements.go index 04af8632c..83a8e55c2 100644 --- a/cli/internal/cmd/configfetchmeasurements.go +++ b/cli/internal/cmd/configfetchmeasurements.go @@ -104,7 +104,7 @@ func runConfigFetchMeasurements(cmd *cobra.Command, _ []string) error { if err := cfm.flags.parse(cmd.Flags()); err != nil { return fmt.Errorf("parsing flags: %w", err) } - cfm.log.Debug(fmt.Sprintf("Using flags %+v", cfm.flags)) + cfm.log.Debug("Using flags", "insecure", cfm.flags.insecure, "measurementsURL", cfm.flags.measurementsURL, "signatureURL", cfm.flags.signatureURL) fetcher := attestationconfigapi.NewFetcherWithClient(http.DefaultClient, constants.CDNRepositoryURL) return cfm.configFetchMeasurements(cmd, fileHandler, fetcher) @@ -152,14 +152,14 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements( return fmt.Errorf("fetching and verifying measurements: %w", err) } } - cfm.log.Debug(fmt.Sprintf("Measurements: %#v\n", fetchedMeasurements)) + cfm.log.Debug(fmt.Sprintf("Measurements: %s", fetchedMeasurements.String())) cfm.log.Debug("Updating measurements in configuration") conf.UpdateMeasurements(fetchedMeasurements) if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil { return err } - cfm.log.Debug(fmt.Sprintf("Configuration written to %s", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) + cfm.log.Debug(fmt.Sprintf("Configuration written to %q", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) cmd.Print("Successfully fetched measurements and updated Configuration\n") return nil } diff --git a/cli/internal/cmd/configgenerate.go b/cli/internal/cmd/configgenerate.go index cfbe10b59..4fabe40e3 100644 --- a/cli/internal/cmd/configgenerate.go +++ b/cli/internal/cmd/configgenerate.go @@ -85,13 +85,13 @@ func runConfigGenerate(cmd *cobra.Command, args []string) error { if err := cg.flags.parse(cmd.Flags()); err != nil { return fmt.Errorf("parsing flags: %w", err) } - log.Debug(fmt.Sprintf("Parsed flags as %+v", cg.flags)) + log.Debug("Using flags", "k8sVersion", cg.flags.k8sVersion, "attestationVariant", cg.flags.attestationVariant) return cg.configGenerate(cmd, fileHandler, provider, args[0]) } func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file.Handler, provider cloudprovider.Provider, rawProvider string) error { - cg.log.Debug(fmt.Sprintf("Using cloud provider %s", provider.String())) + cg.log.Debug(fmt.Sprintf("Using cloud provider %q", provider.String())) // Config creation conf, err := createConfigWithAttestationVariant(provider, rawProvider, cg.flags.attestationVariant) diff --git a/cli/internal/cmd/iamcreate.go b/cli/internal/cmd/iamcreate.go index 4067b33b0..b2e44f4a2 100644 --- a/cli/internal/cmd/iamcreate.go +++ b/cli/internal/cmd/iamcreate.go @@ -133,7 +133,7 @@ func (c *iamCreator) create(ctx context.Context) error { var conf config.Config if c.flags.updateConfig { - c.log.Debug(fmt.Sprintf("Parsing config %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) + c.log.Debug(fmt.Sprintf("Parsing config %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) if err := c.fileHandler.ReadYAML(constants.ConfigFilename, &conf); err != nil { return fmt.Errorf("error reading the configuration file: %w", err) } @@ -161,7 +161,7 @@ func (c *iamCreator) create(ctx context.Context) error { } if c.flags.updateConfig { - c.log.Debug(fmt.Sprintf("Writing IAM configuration to %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) + c.log.Debug(fmt.Sprintf("Writing IAM configuration to %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) c.providerCreator.writeOutputValuesToConfig(&conf, iamFile) if err := c.fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil { return err diff --git a/cli/internal/cmd/init.go b/cli/internal/cmd/init.go index 8075db901..a1e3e3632 100644 --- a/cli/internal/cmd/init.go +++ b/cli/internal/cmd/init.go @@ -82,7 +82,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand // Set the current context to the cluster we just created cfg.CurrentContext = constellConfig.CurrentContext - c.log.Debug(fmt.Sprintf("Set current context to %s", cfg.CurrentContext)) + c.log.Debug(fmt.Sprintf("Set current context to %q", cfg.CurrentContext)) json, err := runtime.Encode(clientcodec.Codec, cfg) if err != nil { @@ -97,7 +97,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand if err := fileHandler.Write(clientcmd.RecommendedHomeFile, mergedKubeconfig, file.OptOverwrite); err != nil { return fmt.Errorf("writing merged kubeconfig to file: %w", err) } - c.log.Debug(fmt.Sprintf("Merged kubeconfig into default config file: %s", clientcmd.RecommendedHomeFile)) + c.log.Debug(fmt.Sprintf("Merged kubeconfig into default config file: %q", clientcmd.RecommendedHomeFile)) return nil } diff --git a/cli/internal/cmd/maapatch.go b/cli/internal/cmd/maapatch.go index a32e8729a..bb7ea381a 100644 --- a/cli/internal/cmd/maapatch.go +++ b/cli/internal/cmd/maapatch.go @@ -56,7 +56,7 @@ func runPatchMAA(cmd *cobra.Command, args []string) error { } func (c *maaPatchCmd) patchMAA(cmd *cobra.Command, attestationURL string) error { - c.log.Debug(fmt.Sprintf("Using attestation URL %s", attestationURL)) + c.log.Debug(fmt.Sprintf("Using attestation URL %q", attestationURL)) if err := c.patcher.Patch(cmd.Context(), attestationURL); err != nil { return fmt.Errorf("patching MAA attestation policy: %w", err) diff --git a/cli/internal/cmd/recover.go b/cli/internal/cmd/recover.go index f3efc3e96..4541fd08d 100644 --- a/cli/internal/cmd/recover.go +++ b/cli/internal/cmd/recover.go @@ -84,7 +84,7 @@ func runRecover(cmd *cobra.Command, _ []string) error { if err := r.flags.parse(cmd.Flags()); err != nil { return err } - r.log.Debug(fmt.Sprintf("Using flags: %+v", r.flags)) + r.log.Debug("Using flags", "debug", r.flags.debug, "endpoint", r.flags.endpoint, "force", r.flags.force) return r.recover(cmd, fileHandler, 5*time.Second, &recoverDoer{log: r.log}, newDialer) } @@ -93,7 +93,7 @@ func (r *recoverCmd) recover( doer recoverDoerInterface, newDialer func(validator atls.Validator) *dialer.Dialer, ) error { var masterSecret uri.MasterSecret - r.log.Debug(fmt.Sprintf("Loading master secret file from %s", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename))) + r.log.Debug(fmt.Sprintf("Loading master secret file from %q", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename))) if err := fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret); err != nil { return err } @@ -108,7 +108,7 @@ func (r *recoverCmd) recover( return err } - r.log.Debug(fmt.Sprintf("Got provider %s", conf.GetProvider())) + r.log.Debug(fmt.Sprintf("Got provider %q", conf.GetProvider())) if conf.GetProvider() == cloudprovider.Azure { interval = 20 * time.Second // Azure LB takes a while to remove unhealthy instances } @@ -129,14 +129,14 @@ func (r *recoverCmd) recover( conf.UpdateMAAURL(stateFile.Infrastructure.Azure.AttestationURL) } - r.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())) + r.log.Debug(fmt.Sprintf("Creating aTLS Validator for %q", conf.GetAttestationConfig().GetVariant())) validator, err := choose.Validator(conf.GetAttestationConfig(), warnLogger{cmd: cmd, log: r.log}) if err != nil { return fmt.Errorf("creating new validator: %w", err) } r.log.Debug("Created a new validator") doer.setDialer(newDialer(validator), endpoint) - r.log.Debug(fmt.Sprintf("Set dialer for endpoint %s", endpoint)) + r.log.Debug(fmt.Sprintf("Set dialer for endpoint %q", endpoint)) doer.setURIs(masterSecret.EncodeToURI(), uri.NoStoreURI) r.log.Debug("Set secrets") if err := r.recoverCall(cmd.Context(), cmd.OutOrStdout(), interval, doer); err != nil { @@ -166,7 +166,7 @@ func (r *recoverCmd) recoverCall(ctx context.Context, out io.Writer, interval ti }) } - r.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %s", retry, err)) + r.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %q", retry, err)) return retry } diff --git a/cli/internal/cmd/upgradecheck.go b/cli/internal/cmd/upgradecheck.go index 74ec31e08..a782ebef2 100644 --- a/cli/internal/cmd/upgradecheck.go +++ b/cli/internal/cmd/upgradecheck.go @@ -187,7 +187,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco // get current image version of the cluster csp := conf.GetProvider() attestationVariant := conf.GetAttestationConfig().GetVariant() - u.log.Debug(fmt.Sprintf("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String())) + u.log.Debug(fmt.Sprintf("Using provider %q with attestation variant %q", csp.String(), attestationVariant.String())) current, err := u.collect.currentVersions(cmd.Context()) if err != nil { @@ -198,12 +198,12 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco if err != nil { return err } - u.log.Debug(fmt.Sprintf("Current cli version: %s", current.cli)) - u.log.Debug(fmt.Sprintf("Supported cli version(s): %s", supported.cli)) - u.log.Debug(fmt.Sprintf("Current service version: %s", current.service)) - u.log.Debug(fmt.Sprintf("Supported service version: %s", supported.service)) - u.log.Debug(fmt.Sprintf("Current k8s version: %s", current.k8s)) - u.log.Debug(fmt.Sprintf("Supported k8s version(s): %s", supported.k8s)) + u.log.Debug(fmt.Sprintf("Current cli version: %q", current.cli)) + u.log.Debug(fmt.Sprintf("Supported cli version(s): %q", supported.cli)) + u.log.Debug(fmt.Sprintf("Current service version: %q", current.service)) + u.log.Debug(fmt.Sprintf("Supported service version: %q", supported.service)) + u.log.Debug(fmt.Sprintf("Current k8s version: %q", current.k8s)) + u.log.Debug(fmt.Sprintf("Supported k8s version(s): %q", supported.k8s)) // Filter versions to only include upgrades newServices := supported.service @@ -343,7 +343,7 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide // get expected measurements for each image upgrades := make(map[string]measurements.M) for _, version := range versions { - v.log.Debug(fmt.Sprintf("Fetching measurements for image: %s", version.Version())) + v.log.Debug(fmt.Sprintf("Fetching measurements for image: %q", version.Version())) shortPath := version.ShortPath() publicKey, err := keyselect.CosignPublicKeyForVersion(version) @@ -363,8 +363,8 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide continue } upgrades[shortPath] = measurements + v.log.Debug("Compatible image measurement found", shortPath, measurements.String()) } - v.log.Debug(fmt.Sprintf("Compatible image measurements are %v", upgrades)) return upgrades, nil } @@ -452,9 +452,9 @@ func (v *versionCollector) newImages(ctx context.Context, currentImageVersion co if err != nil { return nil, fmt.Errorf("calculating next image minor version: %w", err) } - v.log.Debug(fmt.Sprintf("Current image minor version is %s", currentImageMinorVer)) - v.log.Debug(fmt.Sprintf("Current CLI minor version is %s", currentCLIMinorVer)) - v.log.Debug(fmt.Sprintf("Next image minor version is %s", nextImageMinorVer)) + v.log.Debug(fmt.Sprintf("Current image minor version is %q", currentImageMinorVer)) + v.log.Debug(fmt.Sprintf("Current CLI minor version is %q", currentCLIMinorVer)) + v.log.Debug(fmt.Sprintf("Next image minor version is %q", nextImageMinorVer)) allowedMinorVersions := []string{currentImageMinorVer, nextImageMinorVer} switch cliImageCompare := semver.Compare(currentCLIMinorVer, currentImageMinorVer); { @@ -493,7 +493,7 @@ func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions [] patchList, err := v.verListFetcher.FetchVersionList(ctx, patchList) var notFound *fetcher.NotFoundError if errors.As(err, ¬Found) { - v.log.Debug(fmt.Sprintf("Skipping version: %s", err)) + v.log.Debug(fmt.Sprintf("Skipping version: %q", err)) continue } if err != nil { @@ -603,7 +603,7 @@ func getCompatibleImageMeasurements(ctx context.Context, writer io.Writer, clien } var fetchedMeasurements measurements.M - log.Debug(fmt.Sprintf("Fetching for measurement url: %s", measurementsURL)) + log.Debug(fmt.Sprintf("Fetching for measurement url: %q", measurementsURL)) hash, err := fetchedMeasurements.FetchAndVerify( ctx, client, cosign, @@ -657,7 +657,7 @@ func (v *versionCollector) newCLIVersions(ctx context.Context) ([]consemver.Semv return nil, fmt.Errorf("parsing version %s: %w", version, err) } if err := target.IsUpgradeTo(v.cliVersion); err != nil { - v.log.Debug(fmt.Sprintf("Skipping incompatible minor version %q: %s", version, err)) + v.log.Debug(fmt.Sprintf("Skipping incompatible minor version %q: %q", version, err)) continue } list := versionsapi.List{ @@ -691,7 +691,7 @@ func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliP var compatibleVersions []consemver.Semver for _, version := range cliPatchVersions { if err := version.IsUpgradeTo(v.cliVersion); err != nil { - v.log.Debug(fmt.Sprintf("Skipping incompatible patch version %q: %s", version, err)) + v.log.Debug(fmt.Sprintf("Skipping incompatible patch version %q: %q", version, err)) continue } req := versionsapi.CLIInfo{ diff --git a/cli/internal/cmd/verify.go b/cli/internal/cmd/verify.go index 049f02293..f80d1128d 100644 --- a/cli/internal/cmd/verify.go +++ b/cli/internal/cmd/verify.go @@ -128,7 +128,7 @@ func runVerify(cmd *cobra.Command, _ []string) error { if err := v.flags.parse(cmd.Flags()); err != nil { return err } - v.log.Debug(fmt.Sprintf("Using flags: %+v", v.flags)) + v.log.Debug("Using flags", "clusterID", v.flags.clusterID, "endpoint", v.flags.endpoint, "ownerID", v.flags.ownerID) fetcher := attestationconfigapi.NewFetcher() return v.verify(cmd, verifyClient, formatterFactory, fetcher) } @@ -175,7 +175,7 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factor return fmt.Errorf("updating expected PCRs: %w", err) } - c.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())) + c.log.Debug(fmt.Sprintf("Creating aTLS Validator for %q", conf.GetAttestationConfig().GetVariant())) validator, err := choose.Validator(attConfig, warnLogger{cmd: cmd, log: c.log}) if err != nil { return fmt.Errorf("creating aTLS validator: %w", err) diff --git a/e2e/malicious-join/malicious-join.go b/e2e/malicious-join/malicious-join.go index 981035880..2ef649771 100644 --- a/e2e/malicious-join/malicious-join.go +++ b/e2e/malicious-join/malicious-join.go @@ -155,13 +155,13 @@ type maliciousJoiner struct { // join issues a join request to the join service endpoint. func (j *maliciousJoiner) join(ctx context.Context) (*joinproto.IssueJoinTicketResponse, error) { - j.logger.Debug(fmt.Sprintf("Dialing join service endpoint %s", j.endpoint)) + j.logger.Debug(fmt.Sprintf("Dialing join service endpoint %q", j.endpoint)) conn, err := j.dialer.Dial(ctx, j.endpoint) if err != nil { return nil, fmt.Errorf("dialing join service endpoint: %w", err) } defer conn.Close() - j.logger.Debug(fmt.Sprintf("Successfully dialed join service endpoint %s", j.endpoint)) + j.logger.Debug(fmt.Sprintf("Successfully dialed join service endpoint %q", j.endpoint)) protoClient := joinproto.NewAPIClient(conn) @@ -172,7 +172,7 @@ func (j *maliciousJoiner) join(ctx context.Context) (*joinproto.IssueJoinTicketR IsControlPlane: false, } res, err := protoClient.IssueJoinTicket(ctx, req) - j.logger.Debug(fmt.Sprintf("Got join ticket response: %+v", res)) + j.logger.Debug("Got join ticket response", "apiServerEndpoint", res.ApiServerEndpoint, "kubernetesVersion", res.KubernetesVersion) if err != nil { return nil, fmt.Errorf("issuing join ticket: %w", err) } diff --git a/hack/bazel-deps-mirror/check.go b/hack/bazel-deps-mirror/check.go index 64bd68b73..465e46f7f 100644 --- a/hack/bazel-deps-mirror/check.go +++ b/hack/bazel-deps-mirror/check.go @@ -40,7 +40,7 @@ func runCheck(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "mirror", flags.mirror, "mirrorUnauthenticated", flags.mirrorUnauthenticated) filesHelper, err := bazelfiles.New() if err != nil { @@ -89,7 +89,7 @@ func runCheck(cmd *cobra.Command, _ []string) error { } func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *slog.Logger) (issByFile issues.ByFile, err error) { - log.Debug(fmt.Sprintf("Checking file: %s", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("Checking file: %q", bazelFile.RelPath)) issByFile = issues.NewByFile() buildfile, err := fileHelper.LoadFile(bazelFile) if err != nil { @@ -97,12 +97,12 @@ func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCh } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath)) return issByFile, nil } - log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath)) + log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath)) for _, rule := range found { - log.Debug(fmt.Sprintf("Checking rule: %s", rule.Name())) + log.Debug(fmt.Sprintf("Checking rule: %q", rule.Name())) // check if the rule is a valid pinned dependency rule (has all required attributes) if issues := rules.ValidatePinned(rule); len(issues) > 0 { issByFile.Add(rule.Name(), issues...) diff --git a/hack/bazel-deps-mirror/fix.go b/hack/bazel-deps-mirror/fix.go index 9a327ee27..dd231cd06 100644 --- a/hack/bazel-deps-mirror/fix.go +++ b/hack/bazel-deps-mirror/fix.go @@ -40,7 +40,7 @@ func runFix(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "unauthenticated", flags.unauthenticated, "dryRun", flags.dryRun) fileHelper, err := bazelfiles.New() if err != nil { @@ -96,10 +96,10 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath)) return iss, nil } - log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath)) + log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath)) for _, rule := range found { changedRule, ruleIssues := fixRule(ctx, mirrorUpload, rule, log) if len(ruleIssues) > 0 { @@ -113,7 +113,7 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo return iss, nil } if !changed { - log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("No changes to file: %q", bazelFile.RelPath)) return iss, nil } if dryRun { @@ -142,12 +142,12 @@ func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *bu return err } rules.SetHash(rule, learnedHash) - log.Debug(fmt.Sprintf("Learned hash for rule %s: %s", rule.Name(), learnedHash)) + log.Debug(fmt.Sprintf("Learned hash for rule %q: %q", rule.Name(), learnedHash)) return nil } func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) { - log.Debug(fmt.Sprintf("Fixing rule: %s", rule.Name())) + log.Debug(fmt.Sprintf("Fixing rule: %q", rule.Name())) // try to learn the hash if hash, err := rules.GetHash(rule); err != nil || hash == "" { diff --git a/hack/bazel-deps-mirror/internal/mirror/mirror.go b/hack/bazel-deps-mirror/internal/mirror/mirror.go index 1593cc298..a9919adcc 100644 --- a/hack/bazel-deps-mirror/internal/mirror/mirror.go +++ b/hack/bazel-deps-mirror/internal/mirror/mirror.go @@ -95,10 +95,10 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err } for _, url := range urls { - m.log.Debug(fmt.Sprintf("Mirroring file with hash %v from %q", hash, url)) + m.log.Debug(fmt.Sprintf("Mirroring file with hash %q from %q", hash, url)) body, err := m.downloadFromUpstream(ctx, url) if err != nil { - m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err)) + m.log.Debug(fmt.Sprintf("Failed to download file from %q: %q", url, err)) continue } defer body.Close() @@ -129,13 +129,13 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) { m.log.Debug(fmt.Sprintf("Learning new hash from %q", url)) body, err := m.downloadFromUpstream(ctx, url) if err != nil { - m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err)) + m.log.Debug(fmt.Sprintf("Failed to download file from %q: %q", url, err)) continue } defer body.Close() streamedHash := sha256.New() if _, err := io.Copy(streamedHash, body); err != nil { - m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %v", url, err)) + m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %q", url, err)) } learnedHash := hex.EncodeToString(streamedHash.Sum(nil)) m.log.Debug(fmt.Sprintf("File successfully downloaded from %q with %q", url, learnedHash)) @@ -146,7 +146,7 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) { // Check checks if a file is present and has the correct hash in the CAS mirror. func (m *Maintainer) Check(ctx context.Context, expectedHash string) error { - m.log.Debug(fmt.Sprintf("Checking consistency of object with hash %v", expectedHash)) + m.log.Debug(fmt.Sprintf("Checking consistency of object with hash %q", expectedHash)) if m.unauthenticated { return m.checkUnauthenticated(ctx, expectedHash) } @@ -157,7 +157,7 @@ func (m *Maintainer) Check(ctx context.Context, expectedHash string) error { // It uses the authenticated CAS s3 endpoint to download the file metadata. func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string) error { key := path.Join(keyBase, expectedHash) - m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key)) + m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %q, Key: %q}", m.bucket, key)) attributes, err := m.objectStorageClient.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{ Bucket: &m.bucket, Key: &key, @@ -174,7 +174,7 @@ func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string // checksums are not guaranteed to be present // and if present, they are only meaningful for single part objects // fallback if checksum cannot be verified from attributes - m.log.Debug(fmt.Sprintf("S3 object attributes cannot be used to verify key %v. Falling back to download.", key)) + m.log.Debug(fmt.Sprintf("S3 object attributes cannot be used to verify key %q. Falling back to download.", key)) return m.checkUnauthenticated(ctx, expectedHash) } @@ -192,7 +192,7 @@ func (m *Maintainer) checkUnauthenticated(ctx context.Context, expectedHash stri if err != nil { return err } - m.log.Debug(fmt.Sprintf("Check: http get {Url: %v}", pubURL)) + m.log.Debug(fmt.Sprintf("Check: http get {Url: %q}", pubURL)) req, err := http.NewRequestWithContext(ctx, http.MethodGet, pubURL, http.NoBody) if err != nil { return err @@ -221,10 +221,10 @@ func (m *Maintainer) put(ctx context.Context, hash string, data io.Reader) error key := path.Join(keyBase, hash) if m.dryRun { - m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key)) + m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %q, Key: %q}", m.bucket, key)) return nil } - m.log.Debug(fmt.Sprintf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key)) + m.log.Debug(fmt.Sprintf("Uploading object with hash %q to \"s3://%s/%s\"", hash, m.bucket, key)) _, err := m.uploadClient.Upload(ctx, &s3.PutObjectInput{ Bucket: &m.bucket, Key: &key, diff --git a/hack/bazel-deps-mirror/upgrade.go b/hack/bazel-deps-mirror/upgrade.go index e2c07d5c2..8729f0aea 100644 --- a/hack/bazel-deps-mirror/upgrade.go +++ b/hack/bazel-deps-mirror/upgrade.go @@ -40,7 +40,7 @@ func runUpgrade(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "unauthenticated", flags.unauthenticated, "dryRun", flags.dryRun) fileHelper, err := bazelfiles.New() if err != nil { @@ -96,10 +96,10 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath)) return iss, nil } - log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath)) + log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath)) for _, rule := range found { changedRule, ruleIssues := upgradeRule(ctx, mirrorUpload, rule, log) if len(ruleIssues) > 0 { @@ -113,7 +113,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror return iss, nil } if !changed { - log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("No changes to file: %q", bazelFile.RelPath)) return iss, nil } if dryRun { @@ -133,7 +133,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror } func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) { - log.Debug(fmt.Sprintf("Upgrading rule: %s", rule.Name())) + log.Debug(fmt.Sprintf("Upgrading rule: %q", rule.Name())) upstreamURLs, err := rules.UpstreamURLs(rule) if errors.Is(err, rules.ErrNoUpstreamURL) { diff --git a/hack/oci-pin/codegen.go b/hack/oci-pin/codegen.go index 910056ed0..774b794da 100644 --- a/hack/oci-pin/codegen.go +++ b/hack/oci-pin/codegen.go @@ -45,14 +45,14 @@ func runCodegen(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "identifier", flags.identifier, "imageRepoTag", flags.imageRepoTag, "ociPath", flags.ociPath, "pkg", flags.pkg) registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag) if err != nil { return fmt.Errorf("splitting OCI image reference %q: %w", flags.imageRepoTag, err) } - log.Debug(fmt.Sprintf("Generating Go code for OCI image %s.", name)) + log.Debug(fmt.Sprintf("Generating Go code for OCI image %q.", name)) ociIndexPath := filepath.Join(flags.ociPath, "index.json") index, err := os.Open(ociIndexPath) @@ -78,7 +78,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error { return err } - log.Debug(fmt.Sprintf("OCI image digest: %s", digest)) + log.Debug(fmt.Sprintf("OCI image digest: %q", digest)) if err := inject.Render(out, inject.PinningValues{ Package: flags.pkg, diff --git a/hack/oci-pin/merge.go b/hack/oci-pin/merge.go index 94bafd52b..565d08a35 100644 --- a/hack/oci-pin/merge.go +++ b/hack/oci-pin/merge.go @@ -36,7 +36,7 @@ func runMerge(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "inputs", flags.inputs, "output", flags.output, "logLevel", flags.logLevel) log.Debug(fmt.Sprintf("Merging sum file from %q into %q.", flags.inputs, flags.output)) diff --git a/hack/oci-pin/sum.go b/hack/oci-pin/sum.go index d6be5154a..feacd8ca1 100644 --- a/hack/oci-pin/sum.go +++ b/hack/oci-pin/sum.go @@ -42,14 +42,14 @@ func runSum(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "imageRepoTag", flags.imageRepoTag, "ociPath", flags.ociPath) registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag) if err != nil { return fmt.Errorf("splitting repo tag: %w", err) } - log.Debug(fmt.Sprintf("Generating sum file for OCI image %s.", name)) + log.Debug(fmt.Sprintf("Generating sum file for OCI image %q.", name)) ociIndexPath := filepath.Join(flags.ociPath, "index.json") index, err := os.Open(ociIndexPath) @@ -75,7 +75,7 @@ func runSum(cmd *cobra.Command, _ []string) error { return fmt.Errorf("extracting OCI image digest: %w", err) } - log.Debug(fmt.Sprintf("OCI image digest: %s", digest)) + log.Debug(fmt.Sprintf("OCI image digest: %q", digest)) refs := []sums.PinnedImageReference{ { diff --git a/image/upload/internal/cmd/info.go b/image/upload/internal/cmd/info.go index cd629600e..b68db4929 100644 --- a/image/upload/internal/cmd/info.go +++ b/image/upload/internal/cmd/info.go @@ -50,7 +50,7 @@ func runInfo(cmd *cobra.Command, args []string) error { } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "region", flags.region, "bucket", flags.bucket, "distributionID", flags.distributionID) info, err := readInfoArgs(args) if err != nil { return err diff --git a/image/upload/internal/cmd/measurementsenvelope.go b/image/upload/internal/cmd/measurementsenvelope.go index 878dcaf71..70c16a24e 100644 --- a/image/upload/internal/cmd/measurementsenvelope.go +++ b/image/upload/internal/cmd/measurementsenvelope.go @@ -54,7 +54,7 @@ func runEnvelopeMeasurements(cmd *cobra.Command, _ []string) error { } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "version", flags.version.Version(), "csp", flags.csp, "attestationVariant", flags.attestationVariant, "in", flags.in) f, err := os.Open(flags.in) if err != nil { diff --git a/image/upload/internal/cmd/measurementsmerge.go b/image/upload/internal/cmd/measurementsmerge.go index 53ec2de2c..78b283850 100644 --- a/image/upload/internal/cmd/measurementsmerge.go +++ b/image/upload/internal/cmd/measurementsmerge.go @@ -45,7 +45,7 @@ func runMergeMeasurements(cmd *cobra.Command, args []string) error { } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "out", flags.out, "logLevel", flags.logLevel) mergedMeasurements, err := readMeasurementsArgs(args) if err != nil { diff --git a/image/upload/internal/cmd/measurementsupload.go b/image/upload/internal/cmd/measurementsupload.go index 850883c63..720864c78 100644 --- a/image/upload/internal/cmd/measurementsupload.go +++ b/image/upload/internal/cmd/measurementsupload.go @@ -53,7 +53,7 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error { } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "measurementsPath", flags.measurementsPath, "signaturePath", flags.signaturePath, "region", flags.region, "bucket", flags.bucket, "distributionID", flags.distributionID) uploadC, uploadCClose, err := measurementsuploader.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) if err != nil { diff --git a/image/upload/internal/cmd/uplosi.go b/image/upload/internal/cmd/uplosi.go index 13a854683..ddfec8d70 100644 --- a/image/upload/internal/cmd/uplosi.go +++ b/image/upload/internal/cmd/uplosi.go @@ -59,8 +59,8 @@ func runUplosi(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) - + log.Debug("Using flags", "raw-image", flags.rawImage, "attestation-variant", flags.attestationVariant, "csp", flags.provider, "ref", flags.version.Ref(), "stream", flags.version.Stream(), + "version", flags.version.Version(), "region", flags.region, "bucket", flags.bucket, "distribution-id", flags.distributionID, "out", flags.out, "uplosi-path", flags.uplosiPath) archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) if err != nil { return err diff --git a/internal/api/client/client.go b/internal/api/client/client.go index d9ad7ec9e..4929872ed 100644 --- a/internal/api/client/client.go +++ b/internal/api/client/client.go @@ -131,7 +131,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { Bucket: &c.bucket, Prefix: &path, } - c.Logger.Debug(fmt.Sprintf("Listing objects in %s", path)) + c.Logger.Debug(fmt.Sprintf("Listing objects in %q", path)) objs := []s3types.Object{} out := &s3.ListObjectsV2Output{IsTruncated: ptr(true)} for out.IsTruncated != nil && *out.IsTruncated { @@ -142,7 +142,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { } objs = append(objs, out.Contents...) } - c.Logger.Debug(fmt.Sprintf("Found %d objects in %s", len(objs), path)) + c.Logger.Debug(fmt.Sprintf("Found %d objects in %q", len(objs), path)) if len(objs) == 0 { c.Logger.Warn(fmt.Sprintf("Path %s is already empty", path)) @@ -167,7 +167,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { Objects: objIDs, }, } - c.Logger.Debug(fmt.Sprintf("Deleting %d objects in %s", len(objs), path)) + c.Logger.Debug(fmt.Sprintf("Deleting %d objects in %q", len(objs), path)) if _, err := c.s3Client.DeleteObjects(ctx, deleteIn); err != nil { return fmt.Errorf("deleting objects in %s: %w", path, err) } @@ -197,7 +197,7 @@ func Fetch[T APIObject](ctx context.Context, c *Client, obj T) (T, error) { Key: ptr(obj.JSONPath()), } - c.Logger.Debug(fmt.Sprintf("Fetching %T from s3: %s", obj, obj.JSONPath())) + c.Logger.Debug(fmt.Sprintf("Fetching %T from s3: %q", obj, obj.JSONPath())) out, err := c.s3Client.GetObject(ctx, in) var noSuchkey *s3types.NoSuchKey if errors.As(err, &noSuchkey) { @@ -243,7 +243,7 @@ func Update(ctx context.Context, c *Client, obj APIObject) error { c.dirtyPaths = append(c.dirtyPaths, "/"+obj.JSONPath()) - c.Logger.Debug(fmt.Sprintf("Uploading %T to s3: %v", obj, obj.JSONPath())) + c.Logger.Debug(fmt.Sprintf("Uploading %T to s3: %q", obj, obj.JSONPath())) if _, err := c.Upload(ctx, in); err != nil { return fmt.Errorf("uploading %T: %w", obj, err) } @@ -306,7 +306,7 @@ func Delete(ctx context.Context, c *Client, obj APIObject) error { Key: ptr(obj.JSONPath()), } - c.Logger.Debug(fmt.Sprintf("Deleting %T from s3: %s", obj, obj.JSONPath())) + c.Logger.Debug(fmt.Sprintf("Deleting %T from s3: %q", obj, obj.JSONPath())) if _, err := c.DeleteObject(ctx, in); err != nil { return fmt.Errorf("deleting s3 object at %s: %w", obj.JSONPath(), err) } diff --git a/internal/api/versionsapi/cli/add.go b/internal/api/versionsapi/cli/add.go index 89c64c2b7..f1a6fc4fd 100644 --- a/internal/api/versionsapi/cli/add.go +++ b/internal/api/versionsapi/cli/add.go @@ -53,7 +53,8 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "dryRun", flags.dryRun, "kind", flags.kind, "latest", flags.latest, "ref", flags.ref, + "release", flags.release, "stream", flags.stream, "version", flags.version) log.Debug("Validating flags") if err := flags.validate(log); err != nil { @@ -117,7 +118,7 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, kind version } else if err != nil { return fmt.Errorf("failed to list minor versions: %w", err) } - log.Debug(fmt.Sprintf("%s version list: %v", gran.String(), verList)) + log.Debug(fmt.Sprintf("%q version list: %v", gran.String(), verList.Versions)) insertGran := gran + 1 insertVersion := ver.WithGranularity(insertGran) @@ -129,7 +130,7 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, kind version log.Info(fmt.Sprintf("Inserting %s version %q into list", insertGran.String(), insertVersion)) verList.Versions = append(verList.Versions, insertVersion) - log.Debug(fmt.Sprintf("New %s version list: %v", gran.String(), verList)) + log.Debug(fmt.Sprintf("New %q version list: %v", gran.String(), verList.Versions)) if err := client.UpdateVersionList(ctx, verList); err != nil { return fmt.Errorf("failed to add %s version: %w", gran.String(), err) diff --git a/internal/api/versionsapi/cli/latest.go b/internal/api/versionsapi/cli/latest.go index 797cfc64d..6b3c3983f 100644 --- a/internal/api/versionsapi/cli/latest.go +++ b/internal/api/versionsapi/cli/latest.go @@ -39,7 +39,7 @@ func runLatest(cmd *cobra.Command, _ []string) (retErr error) { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "ref", flags.ref, "stream", flags.stream, "json", flags.json) log.Debug("Validating flags") if err := flags.validate(); err != nil { diff --git a/internal/api/versionsapi/cli/list.go b/internal/api/versionsapi/cli/list.go index 717ba6c77..1aa6d88c6 100644 --- a/internal/api/versionsapi/cli/list.go +++ b/internal/api/versionsapi/cli/list.go @@ -44,7 +44,8 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "bucket", flags.bucket, "distributionID", flags.distributionID, "json", flags.json, "minorVersion", flags.minorVersion, + "ref", flags.ref, "region", flags.region, "stream", flags.stream) log.Debug("Validating flags") if err := flags.validate(); err != nil { diff --git a/internal/api/versionsapi/cli/rm.go b/internal/api/versionsapi/cli/rm.go index 51802b5fb..b5e90bb58 100644 --- a/internal/api/versionsapi/cli/rm.go +++ b/internal/api/versionsapi/cli/rm.go @@ -75,7 +75,9 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + log.Debug("Using flags", "all", flags.all, "azLocation", flags.azLocation, "azResourceGroup", flags.azResourceGroup, "azSubscription", flags.azSubscription, + "bucket", flags.bucket, "distributionID", flags.distributionID, "dryrun", flags.dryrun, "gcpProject", flags.gcpProject, "ref", flags.ref, + "region", flags.region, "stream", flags.stream, "version", flags.version, "versionPath", flags.versionPath) log.Debug("Validating flags") if err := flags.validate(); err != nil { @@ -138,12 +140,12 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) { func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *slog.Logger) error { var retErr error - log.Debug(fmt.Sprintf("Deleting images for %s", ver.Version())) + log.Debug(fmt.Sprintf("Deleting images for %q", ver.Version())) if err := deleteImage(ctx, clients, ver, dryrun, log); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting images: %w", err)) } - log.Debug(fmt.Sprintf("Deleting version %s from versions API", ver.Version())) + log.Debug(fmt.Sprintf("Deleting version %q from versions API", ver.Version())) if err := clients.version.DeleteVersion(ctx, ver); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting version from versions API: %w", err)) } @@ -159,7 +161,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b minorVersions, err := listMinorVersions(ctx, clients.version, ref, stream) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - log.Debug(fmt.Sprintf("No minor versions found for stream %s", stream)) + log.Debug(fmt.Sprintf("No minor versions found for stream %q", stream)) continue } else if err != nil { return fmt.Errorf("listing minor versions for stream %s: %w", stream, err) @@ -167,7 +169,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b patchVersions, err := listPatchVersions(ctx, clients.version, ref, stream, minorVersions) if errors.As(err, ¬FoundErr) { - log.Debug(fmt.Sprintf("No patch versions found for stream %s", stream)) + log.Debug(fmt.Sprintf("No patch versions found for stream %q", stream)) continue } else if err != nil { return fmt.Errorf("listing patch versions for stream %s: %w", stream, err) @@ -406,7 +408,7 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, return err } a.ec2 = ec2.NewFromConfig(cfg) - log.Debug(fmt.Sprintf("Deleting resources in AWS region %s", region)) + log.Debug(fmt.Sprintf("Deleting resources in AWS region %q", region)) snapshotID, err := a.getSnapshotID(ctx, ami, log) if err != nil { @@ -427,7 +429,7 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, } func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool, log *slog.Logger) error { - log.Debug(fmt.Sprintf("Deregistering image %s", ami)) + log.Debug(fmt.Sprintf("Deregistering image %q", ami)) deregisterReq := ec2.DeregisterImageInput{ ImageId: &ami, @@ -446,7 +448,7 @@ func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool } func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Logger) (string, error) { - log.Debug(fmt.Sprintf("Describing image %s", ami)) + log.Debug(fmt.Sprintf("Describing image %q", ami)) req := ec2.DescribeImagesInput{ ImageIds: []string{ami}, @@ -482,7 +484,7 @@ func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Log } func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryrun bool, log *slog.Logger) error { - log.Debug(fmt.Sprintf("Deleting AWS snapshot %s", snapshotID)) + log.Debug(fmt.Sprintf("Deleting AWS snapshot %q", snapshotID)) req := ec2.DeleteSnapshotInput{ SnapshotId: &snapshotID, @@ -536,11 +538,11 @@ func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun boo } if dryrun { - log.Debug(fmt.Sprintf("DryRun: delete image request: %v", req)) + log.Debug(fmt.Sprintf("DryRun: delete image request: %q", req.String())) return nil } - log.Debug(fmt.Sprintf("Deleting image %s", image)) + log.Debug(fmt.Sprintf("Deleting image %q", image)) op, err := g.compute.Delete(ctx, req) if err != nil && strings.Contains(err.Error(), "404") { log.Warn(fmt.Sprintf("GCP image %s not found", image)) @@ -631,7 +633,7 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool } if dryrun { - log.Debug(fmt.Sprintf("DryRun: delete image %v", azImage)) + log.Debug(fmt.Sprintf("DryRun: delete image: gallery: %q, image definition: %q, resource group: %q, version: %q", azImage.gallery, azImage.imageDefinition, azImage.resourceGroup, azImage.version)) return nil } @@ -663,7 +665,7 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool time.Sleep(15 * time.Second) // Azure needs time understand that there is no version left... - log.Debug(fmt.Sprintf("Deleting image definition %s", azImage.imageDefinition)) + log.Debug(fmt.Sprintf("Deleting image definition %q", azImage.imageDefinition)) op, err := a.image.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, nil) if err != nil { return fmt.Errorf("deleting image definition %s: %w", azImage.imageDefinition, err) @@ -687,7 +689,7 @@ type azImage struct { func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Logger) (azImage, error) { if m := azImageRegex.FindStringSubmatch(image); len(m) == 5 { log.Debug(fmt.Sprintf( - "Image matches local image format, resource group: %s, gallery: %s, image definition: %s, version: %s", + "Image matches local image format, resource group: %q, gallery: %q, image definition: %q, version: %q", m[1], m[2], m[3], m[4], )) return azImage{ @@ -708,7 +710,7 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Lo version := m[3] log.Debug(fmt.Sprintf( - "Image matches community image format, gallery public name: %s, image definition: %s, version: %s", + "Image matches community image format, gallery public name: %q, image definition: %q, version: %q", galleryPublicName, imageDefinition, version, )) @@ -725,15 +727,15 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Lo continue } if v.Properties.SharingProfile == nil { - log.Debug(fmt.Sprintf("Skipping gallery %s with nil sharing profile", *v.Name)) + log.Debug(fmt.Sprintf("Skipping gallery %q with nil sharing profile", *v.Name)) continue } if v.Properties.SharingProfile.CommunityGalleryInfo == nil { - log.Debug(fmt.Sprintf("Skipping gallery %s with nil community gallery info", *v.Name)) + log.Debug(fmt.Sprintf("Skipping gallery %q with nil community gallery info", *v.Name)) continue } if v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames == nil { - log.Debug(fmt.Sprintf("Skipping gallery %s with nil public names", *v.Name)) + log.Debug(fmt.Sprintf("Skipping gallery %q with nil public names", *v.Name)) continue } for _, publicName := range v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames { diff --git a/internal/api/versionsapi/client.go b/internal/api/versionsapi/client.go index c03e8a7b6..5d14fdacd 100644 --- a/internal/api/versionsapi/client.go +++ b/internal/api/versionsapi/client.go @@ -131,18 +131,18 @@ func (c *Client) DeleteRef(ctx context.Context, ref string) error { func (c *Client) DeleteVersion(ctx context.Context, ver Version) error { var retErr error - c.Client.Logger.Debug(fmt.Sprintf("Deleting version %s from minor version list", ver.version)) + c.Client.Logger.Debug(fmt.Sprintf("Deleting version %q from minor version list", ver.version)) possibleNewLatest, err := c.deleteVersionFromMinorVersionList(ctx, ver) if err != nil { retErr = errors.Join(retErr, fmt.Errorf("removing from minor version list: %w", err)) } - c.Client.Logger.Debug(fmt.Sprintf("Checking latest version for %s", ver.version)) + c.Client.Logger.Debug(fmt.Sprintf("Checking latest version for %q", ver.version)) if err := c.deleteVersionFromLatest(ctx, ver, possibleNewLatest); err != nil { retErr = errors.Join(retErr, fmt.Errorf("updating latest version: %w", err)) } - c.Client.Logger.Debug(fmt.Sprintf("Deleting artifact path %s for %s", ver.ArtifactPath(APIV1), ver.version)) + c.Client.Logger.Debug(fmt.Sprintf("Deleting artifact path %q for %q", ver.ArtifactPath(APIV1), ver.version)) if err := c.Client.DeletePath(ctx, ver.ArtifactPath(APIV1)); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting artifact path: %w", err)) } @@ -159,7 +159,7 @@ func (c *Client) deleteVersionFromMinorVersionList(ctx context.Context, ver Vers Base: ver.WithGranularity(GranularityMinor), Kind: VersionKindImage, } - c.Client.Logger.Debug(fmt.Sprintf("Fetching minor version list for version %s", ver.version)) + c.Client.Logger.Debug(fmt.Sprintf("Fetching minor version list for version %q", ver.version)) minorList, err := c.FetchVersionList(ctx, minorList) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { @@ -196,16 +196,16 @@ func (c *Client) deleteVersionFromMinorVersionList(ctx context.Context, ver Vers } if c.Client.DryRun { - c.Client.Logger.Debug(fmt.Sprintf("DryRun: Updating minor version list %s to %v", minorList.JSONPath(), minorList)) + c.Client.Logger.Debug(fmt.Sprintf("DryRun: Updating minor version list %q to %v", minorList.JSONPath(), minorList)) return latest, nil } - c.Client.Logger.Debug(fmt.Sprintf("Updating minor version list %s", minorList.JSONPath())) + c.Client.Logger.Debug(fmt.Sprintf("Updating minor version list %q", minorList.JSONPath())) if err := c.UpdateVersionList(ctx, minorList); err != nil { return latest, fmt.Errorf("updating minor version list %s: %w", minorList.JSONPath(), err) } - c.Client.Logger.Debug(fmt.Sprintf("Removed version %s from minor version list %s", ver.version, minorList.JSONPath())) + c.Client.Logger.Debug(fmt.Sprintf("Removed version %q from minor version list %q", ver.version, minorList.JSONPath())) return latest, nil } @@ -216,7 +216,7 @@ func (c *Client) deleteVersionFromLatest(ctx context.Context, ver Version, possi Stream: ver.stream, Kind: VersionKindImage, } - c.Client.Logger.Debug(fmt.Sprintf("Fetching latest version from %s", latest.JSONPath())) + c.Client.Logger.Debug(fmt.Sprintf("Fetching latest version from %q", latest.JSONPath())) latest, err := c.FetchVersionLatest(ctx, latest) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { @@ -227,7 +227,7 @@ func (c *Client) deleteVersionFromLatest(ctx context.Context, ver Version, possi } if latest.Version != ver.version { - c.Client.Logger.Debug(fmt.Sprintf("Latest version is %s, not the deleted version %s", latest.Version, ver.version)) + c.Client.Logger.Debug(fmt.Sprintf("Latest version is %q, not the deleted version %q", latest.Version, ver.version)) return nil } @@ -238,7 +238,7 @@ func (c *Client) deleteVersionFromLatest(ctx context.Context, ver Version, possi } if c.Client.DryRun { - c.Client.Logger.Debug(fmt.Sprintf("Would update latest version from %s to %s", latest.Version, possibleNewLatest.Version)) + c.Client.Logger.Debug(fmt.Sprintf("Would update latest version from %q to %q", latest.Version, possibleNewLatest.Version)) return nil } diff --git a/internal/attestation/measurements/measurements.go b/internal/attestation/measurements/measurements.go index 62172dd26..a702706bd 100644 --- a/internal/attestation/measurements/measurements.go +++ b/internal/attestation/measurements/measurements.go @@ -26,6 +26,7 @@ import ( "net/url" "sort" "strconv" + "strings" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" @@ -330,6 +331,15 @@ func (m *M) UnmarshalYAML(unmarshal func(any) error) error { return nil } +// String returns a string representation of the measurements. +func (m M) String() string { + var returnString string + for i, measurement := range m { + returnString = strings.Join([]string{returnString, fmt.Sprintf("%d: 0x%s", i, hex.EncodeToString(measurement.Expected))}, ",") + } + return returnString +} + func (m *M) fromImageMeasurementsV2( measurements ImageMeasurementsV2, wantVersion versionsapi.Version, csp cloudprovider.Provider, attestationVariant variant.Variant, diff --git a/internal/constellation/apply.go b/internal/constellation/apply.go index bbd61cf8c..c9844b435 100644 --- a/internal/constellation/apply.go +++ b/internal/constellation/apply.go @@ -87,7 +87,7 @@ func (a *Applier) SetKubeConfig(kubeConfig []byte) error { // CheckLicense checks the given Constellation license with the license server // and returns the allowed quota for the license. func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider, initRequest bool, licenseID string) (int, error) { - a.log.Debug(fmt.Sprintf("Contacting license server for license '%s'", licenseID)) + a.log.Debug(fmt.Sprintf("Contacting license server for license %q", licenseID)) var action license.Action if initRequest { @@ -103,7 +103,7 @@ func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider, if err != nil { return 0, fmt.Errorf("checking license: %w", err) } - a.log.Debug(fmt.Sprintf("Got response from license server for license '%s'", licenseID)) + a.log.Debug(fmt.Sprintf("Got response from license server for license %q", licenseID)) return quota, nil } diff --git a/internal/constellation/applyinit.go b/internal/constellation/applyinit.go index f02c9e8cc..e451e4fd8 100644 --- a/internal/constellation/applyinit.go +++ b/internal/constellation/applyinit.go @@ -85,12 +85,12 @@ func (a *Applier) Init( // Create a wrapper function that allows logging any returned error from the retrier before checking if it's the expected retriable one. serviceIsUnavailable := func(err error) bool { isServiceUnavailable := grpcRetry.ServiceIsUnavailable(err) - a.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %s", isServiceUnavailable, err)) + a.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %q", isServiceUnavailable, err)) return isServiceUnavailable } // Perform the RPC - a.log.Debug(fmt.Sprintf("Making initialization call, doer is %+v", doer)) + a.log.Debug("Initialization call", "endpoint", doer.endpoint, "kmsURI", doer.req.KmsUri, "storageURI", doer.req.StorageUri) a.spinner.Start("Connecting ", false) retrier := retry.NewIntervalRetrier(doer, 30*time.Second, serviceIsUnavailable) if err := retrier.Do(ctx); err != nil { @@ -99,7 +99,7 @@ func (a *Applier) Init( a.spinner.Stop() a.log.Debug("Initialization request finished") - a.log.Debug(fmt.Sprintf("Rewriting cluster server address in kubeconfig to %s", state.Infrastructure.ClusterEndpoint)) + a.log.Debug(fmt.Sprintf("Rewriting cluster server address in kubeconfig to %q", state.Infrastructure.ClusterEndpoint)) kubeconfig, err := clientcmd.Load(doer.resp.Kubeconfig) if err != nil { return InitOutput{}, fmt.Errorf("loading kubeconfig: %w", err) @@ -175,7 +175,7 @@ func (d *initDoer) Do(ctx context.Context) error { conn, err := d.dialer.Dial(ctx, d.endpoint) if err != nil { - d.log.Debug(fmt.Sprintf("Dialing init server failed: %s. Retrying...", err)) + d.log.Debug(fmt.Sprintf("Dialing init server failed: %q. Retrying...", err)) return fmt.Errorf("dialing init server: %w", err) } defer conn.Close() @@ -200,7 +200,7 @@ func (d *initDoer) Do(ctx context.Context) error { res, err := resp.Recv() // get first response, either success or failure if err != nil { if e := d.getLogs(resp); e != nil { - d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e)) + d.log.Debug(fmt.Sprintf("Failed to collect logs: %q", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: err, @@ -214,7 +214,7 @@ func (d *initDoer) Do(ctx context.Context) error { d.resp = res.GetInitSuccess() case *initproto.InitResponse_InitFailure: if e := d.getLogs(resp); e != nil { - d.log.Debug(fmt.Sprintf("Failed to get logs from cluster: %s", e)) + d.log.Debug(fmt.Sprintf("Failed to get logs from cluster: %q", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: errors.New(res.GetInitFailure().GetError()), @@ -225,7 +225,7 @@ func (d *initDoer) Do(ctx context.Context) error { d.log.Debug("Cluster returned nil response type") err = errors.New("empty response from cluster") if e := d.getLogs(resp); e != nil { - d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e)) + d.log.Debug(fmt.Sprintf("Failed to collect logs: %q", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: err, @@ -236,7 +236,7 @@ func (d *initDoer) Do(ctx context.Context) error { d.log.Debug("Cluster returned unknown response type") err = errors.New("unknown response from cluster") if e := d.getLogs(resp); e != nil { - d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e)) + d.log.Debug(fmt.Sprintf("Failed to collect logs: %q", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: err, diff --git a/internal/constellation/helm/actionfactory.go b/internal/constellation/helm/actionfactory.go index f1a069399..67ca3ab34 100644 --- a/internal/constellation/helm/actionfactory.go +++ b/internal/constellation/helm/actionfactory.go @@ -90,15 +90,15 @@ func (a actionFactory) appendNewAction( ) } - a.log.Debug(fmt.Sprintf("release %s not found, adding to new releases...", release.releaseName)) + a.log.Debug(fmt.Sprintf("release %q not found, adding to new releases...", release.releaseName)) *actions = append(*actions, a.newInstall(release, timeout)) return nil } if err != nil { return fmt.Errorf("getting version for %s: %w", release.releaseName, err) } - a.log.Debug(fmt.Sprintf("Current %s version: %s", release.releaseName, currentVersion)) - a.log.Debug(fmt.Sprintf("New %s version: %s", release.releaseName, newVersion)) + a.log.Debug(fmt.Sprintf("Current %q version: %q", release.releaseName, currentVersion)) + a.log.Debug(fmt.Sprintf("New %q version: %q", release.releaseName, newVersion)) if !force { // For charts we package ourselves, the version is equal to the CLI version (charts are embedded in the binary). @@ -132,7 +132,7 @@ func (a actionFactory) appendNewAction( release.releaseName == certManagerInfo.releaseName { return ErrConfirmationMissing } - a.log.Debug(fmt.Sprintf("Upgrading %s from %s to %s", release.releaseName, currentVersion, newVersion)) + a.log.Debug(fmt.Sprintf("Upgrading %q from %q to %q", release.releaseName, currentVersion, newVersion)) *actions = append(*actions, a.newUpgrade(release, timeout)) return nil } @@ -165,7 +165,7 @@ func (a actionFactory) updateCRDs(ctx context.Context, chart *chart.Chart) error for _, dep := range chart.Dependencies() { for _, crdFile := range dep.Files { if strings.HasPrefix(crdFile.Name, "crds/") { - a.log.Debug(fmt.Sprintf("Updating crd: %s", crdFile.Name)) + a.log.Debug(fmt.Sprintf("Updating crd: %q", crdFile.Name)) err := a.kubeClient.ApplyCRD(ctx, crdFile.Data) if err != nil { return err diff --git a/internal/constellation/helm/retryaction.go b/internal/constellation/helm/retryaction.go index 89579356e..7117f0b45 100644 --- a/internal/constellation/helm/retryaction.go +++ b/internal/constellation/helm/retryaction.go @@ -49,7 +49,7 @@ func retryApply(ctx context.Context, action retrieableApplier, retryInterval tim return fmt.Errorf("helm install: %w", err) } retryLoopFinishDuration := time.Since(retryLoopStartTime) - log.Debug(fmt.Sprintf("Helm chart %q installation finished after %s", action.ReleaseName(), retryLoopFinishDuration)) + log.Debug(fmt.Sprintf("Helm chart %q installation finished after %q", action.ReleaseName(), retryLoopFinishDuration)) return nil } @@ -61,9 +61,9 @@ type applyDoer struct { // Do tries to apply the action. func (i applyDoer) Do(ctx context.Context) error { - i.log.Debug(fmt.Sprintf("Trying to apply Helm chart %s", i.applier.ReleaseName())) + i.log.Debug(fmt.Sprintf("Trying to apply Helm chart %q", i.applier.ReleaseName())) if err := i.applier.apply(ctx); err != nil { - i.log.Debug(fmt.Sprintf("Helm chart installation %s failed: %v", i.applier.ReleaseName(), err)) + i.log.Debug(fmt.Sprintf("Helm chart installation %q failed: %q", i.applier.ReleaseName(), err)) return err } diff --git a/internal/constellation/kubecmd/backup.go b/internal/constellation/kubecmd/backup.go index c7e32d5be..28763a146 100644 --- a/internal/constellation/kubecmd/backup.go +++ b/internal/constellation/kubecmd/backup.go @@ -39,7 +39,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr for i := range crds { path := filepath.Join(crdBackupFolder, crds[i].Name+".yaml") - k.log.Debug(fmt.Sprintf("Creating CRD backup: %s", path)) + k.log.Debug(fmt.Sprintf("Creating CRD backup: %q", path)) // We have to manually set kind/apiversion because of a long-standing limitation of the API: // https://github.com/kubernetes/kubernetes/issues/3030#issuecomment-67543738 @@ -64,7 +64,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error { k.log.Debug("Starting CR backup") for _, crd := range crds { - k.log.Debug(fmt.Sprintf("Creating backup for resource type: %s", crd.Name)) + k.log.Debug(fmt.Sprintf("Creating backup for resource type: %q", crd.Name)) // Iterate over all versions of the CRD // TODO(daniel-weisse): Consider iterating over crd.Status.StoredVersions instead diff --git a/internal/constellation/kubecmd/kubecmd.go b/internal/constellation/kubecmd/kubecmd.go index dedb4539b..ad6b83c77 100644 --- a/internal/constellation/kubecmd/kubecmd.go +++ b/internal/constellation/kubecmd/kubecmd.go @@ -103,7 +103,7 @@ func (k *KubeCmd) UpgradeNodeImage(ctx context.Context, imageVersion semver.Semv return fmt.Errorf("updating image version: %w", err) } - k.log.Debug(fmt.Sprintf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.String())) + k.log.Debug(fmt.Sprintf("Updating local copy of nodeVersion image version from %q to %q", nodeVersion.Spec.ImageVersion, imageVersion.String())) nodeVersion.Spec.ImageReference = imageReference nodeVersion.Spec.ImageVersion = imageVersion.String() @@ -266,7 +266,7 @@ func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNa k.log.Debug("No new SANs to add to the cluster's apiserver SAN field") return nil } - k.log.Debug(fmt.Sprintf("Extending the cluster's apiserver SAN field with the following SANs: %s\n", strings.Join(missingSANs, ", "))) + k.log.Debug(fmt.Sprintf("Extending the cluster's apiserver SAN field with the following SANs: %q", strings.Join(missingSANs, ", "))) clusterConfiguration.APIServer.CertSANs = append(clusterConfiguration.APIServer.CertSANs, missingSANs...) sort.Strings(clusterConfiguration.APIServer.CertSANs) @@ -409,7 +409,7 @@ func (k *KubeCmd) prepareUpdateK8s(nodeVersion *updatev1alpha1.NodeVersion, newC } } - k.log.Debug(fmt.Sprintf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)) + k.log.Debug(fmt.Sprintf("Updating local copy of nodeVersion Kubernetes version from %q to %q", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)) nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion @@ -461,7 +461,7 @@ func retryGetJoinConfig(ctx context.Context, kubectl kubectlInterface, retryInte return false } retries++ - log.Debug(fmt.Sprintf("Getting join-config ConfigMap failed (attempt %d/%d): %s", retries, maxRetryAttempts, err)) + log.Debug(fmt.Sprintf("Getting join-config ConfigMap failed (attempt %d/%d): %q", retries, maxRetryAttempts, err)) return retries < maxRetryAttempts } @@ -483,7 +483,7 @@ func retryAction(ctx context.Context, retryInterval time.Duration, maxRetries in ctr := 0 retrier := conretry.NewIntervalRetrier(&kubeDoer{action: action}, retryInterval, func(err error) bool { ctr++ - log.Debug(fmt.Sprintf("Action failed (attempt %d/%d): %s", ctr, maxRetries, err)) + log.Debug(fmt.Sprintf("Action failed (attempt %d/%d): %q", ctr, maxRetries, err)) return ctr < maxRetries }) return retrier.Do(ctx) diff --git a/internal/grpc/grpclog/grpclog.go b/internal/grpc/grpclog/grpclog.go index e29c990b6..be4d27ff3 100644 --- a/internal/grpc/grpclog/grpclog.go +++ b/internal/grpc/grpclog/grpclog.go @@ -31,15 +31,15 @@ func LogStateChangesUntilReady(ctx context.Context, conn getStater, log debugLog go func() { defer wg.Done() state := conn.GetState() - log.Debug(fmt.Sprintf("Connection state started as %s", state)) + log.Debug(fmt.Sprintf("Connection state started as %q", state)) for ; state != connectivity.Ready && conn.WaitForStateChange(ctx, state); state = conn.GetState() { - log.Debug(fmt.Sprintf("Connection state changed to %s", state)) + log.Debug(fmt.Sprintf("Connection state changed to %q", state)) } if state == connectivity.Ready { log.Debug("Connection ready") isReadyCallback() } else { - log.Debug(fmt.Sprintf("Connection state ended with %s", state)) + log.Debug(fmt.Sprintf("Connection state ended with %q", state)) } }() } diff --git a/internal/grpc/grpclog/grpclog_test.go b/internal/grpc/grpclog/grpclog_test.go index 704f1a923..eb912521f 100644 --- a/internal/grpc/grpclog/grpclog_test.go +++ b/internal/grpc/grpclog/grpclog_test.go @@ -33,8 +33,8 @@ func TestLogStateChanges(t *testing.T) { }, assert: func(t *testing.T, lg *spyLog, isReadyCallbackCalled bool) { require.Len(t, lg.msgs, 3) - assert.Equal(t, "Connection state started as CONNECTING", lg.msgs[0]) - assert.Equal(t, "Connection state changed to CONNECTING", lg.msgs[1]) + assert.Equal(t, "Connection state started as \"CONNECTING\"", lg.msgs[0]) + assert.Equal(t, "Connection state changed to \"CONNECTING\"", lg.msgs[1]) assert.Equal(t, "Connection ready", lg.msgs[2]) assert.True(t, isReadyCallbackCalled) }, @@ -49,7 +49,7 @@ func TestLogStateChanges(t *testing.T) { }, assert: func(t *testing.T, lg *spyLog, isReadyCallbackCalled bool) { require.Len(t, lg.msgs, 2) - assert.Equal(t, "Connection state started as READY", lg.msgs[0]) + assert.Equal(t, "Connection state started as \"READY\"", lg.msgs[0]) assert.Equal(t, "Connection ready", lg.msgs[1]) assert.True(t, isReadyCallbackCalled) }, @@ -64,8 +64,8 @@ func TestLogStateChanges(t *testing.T) { }, assert: func(t *testing.T, lg *spyLog, isReadyCallbackCalled bool) { require.Len(t, lg.msgs, 2) - assert.Equal(t, "Connection state started as CONNECTING", lg.msgs[0]) - assert.Equal(t, "Connection state ended with CONNECTING", lg.msgs[1]) + assert.Equal(t, "Connection state started as \"CONNECTING\"", lg.msgs[0]) + assert.Equal(t, "Connection state ended with \"CONNECTING\"", lg.msgs[1]) assert.False(t, isReadyCallbackCalled) }, }, diff --git a/internal/osimage/archive/archive.go b/internal/osimage/archive/archive.go index f42b48e4c..f49cf0de8 100644 --- a/internal/osimage/archive/archive.go +++ b/internal/osimage/archive/archive.go @@ -74,7 +74,7 @@ func (a *Archivist) Archive(ctx context.Context, version versionsapi.Version, cs if err != nil { return "", err } - a.log.Debug(fmt.Sprintf("Archiving OS image %s %s %v to s3://%v/%v", csp, attestationVariant, version.ShortPath(), a.bucket, key)) + a.log.Debug(fmt.Sprintf("Archiving OS image %q to s3://%s/%s", fmt.Sprintf("%s %s %v", csp, attestationVariant, version.ShortPath()), a.bucket, key)) _, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{ Bucket: &a.bucket, Key: &key, diff --git a/internal/osimage/imageinfo/imageinfo.go b/internal/osimage/imageinfo/imageinfo.go index a26ab24a5..844690bd9 100644 --- a/internal/osimage/imageinfo/imageinfo.go +++ b/internal/osimage/imageinfo/imageinfo.go @@ -78,7 +78,7 @@ func (a *Uploader) Upload(ctx context.Context, imageInfo versionsapi.ImageInfo) if err != nil { return "", err } - a.log.Debug(fmt.Sprintf("Archiving image info to s3://%v/%v", a.bucket, key)) + a.log.Debug(fmt.Sprintf("Archiving image info to s3://%s/%s", a.bucket, key)) buf := &bytes.Buffer{} if err := json.NewEncoder(buf).Encode(imageInfo); err != nil { return "", err diff --git a/internal/osimage/measurementsuploader/measurementsuploader.go b/internal/osimage/measurementsuploader/measurementsuploader.go index 1e6c9ffa0..59c2eecfe 100644 --- a/internal/osimage/measurementsuploader/measurementsuploader.go +++ b/internal/osimage/measurementsuploader/measurementsuploader.go @@ -92,7 +92,7 @@ func (a *Uploader) Upload(ctx context.Context, rawMeasurement, signature io.Read if err != nil { return "", "", err } - a.log.Debug(fmt.Sprintf("Archiving image measurements to s3://%v/%v and s3://%v/%v", a.bucket, key, a.bucket, sigKey)) + a.log.Debug(fmt.Sprintf("Archiving image measurements to s3://%s/%s and s3://%s/%s", a.bucket, key, a.bucket, sigKey)) if _, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{ Bucket: &a.bucket, Key: &key, diff --git a/internal/osimage/nop/nop.go b/internal/osimage/nop/nop.go index 5618acf03..883a7bf3c 100644 --- a/internal/osimage/nop/nop.go +++ b/internal/osimage/nop/nop.go @@ -28,6 +28,6 @@ func New(log *slog.Logger) *Uploader { // Upload pretends to upload images to a csp. func (u *Uploader) Upload(_ context.Context, req *osimage.UploadRequest) ([]versionsapi.ImageInfoEntry, error) { - u.log.Debug(fmt.Sprintf("Skipping image upload of %s since this CSP does not require images to be uploaded in advance.", req.Version.ShortPath())) + u.log.Debug(fmt.Sprintf("Skipping image upload of %q since this CSP does not require images to be uploaded in advance.", req.Version.ShortPath())) return nil, nil } diff --git a/internal/staticupload/staticupload.go b/internal/staticupload/staticupload.go index fd09734ad..5b68e8ae0 100644 --- a/internal/staticupload/staticupload.go +++ b/internal/staticupload/staticupload.go @@ -134,7 +134,7 @@ func (c *Client) Flush(ctx context.Context) error { c.mux.Lock() defer c.mux.Unlock() - c.logger.Debug(fmt.Sprintf("Invalidating keys: %s", c.dirtyKeys)) + c.logger.Debug(fmt.Sprintf("Invalidating keys: %q", c.dirtyKeys)) if len(c.dirtyKeys) == 0 { return nil } @@ -219,7 +219,7 @@ func (c *Client) waitForInvalidations(ctx context.Context) error { } waiter := cloudfront.NewInvalidationCompletedWaiter(c.cdnClient) - c.logger.Debug(fmt.Sprintf("Waiting for invalidations %s in distribution %s", c.invalidationIDs, c.distributionID)) + c.logger.Debug(fmt.Sprintf("Waiting for invalidations %v in distribution %q", c.invalidationIDs, c.distributionID)) for _, invalidationID := range c.invalidationIDs { waitIn := &cloudfront.GetInvalidationInput{ DistributionId: &c.distributionID, diff --git a/internal/verify/verify.go b/internal/verify/verify.go index 60b2e726e..8bd0eb25d 100644 --- a/internal/verify/verify.go +++ b/internal/verify/verify.go @@ -216,7 +216,7 @@ type Certificate struct { func newCertificates(certTypeName string, cert []byte, log debugLog) (certs []Certificate, err error) { newlinesTrimmed := strings.TrimSpace(string(cert)) - log.Debug(fmt.Sprintf("Decoding PEM certificate: %s", certTypeName)) + log.Debug(fmt.Sprintf("Decoding PEM certificate: %q", certTypeName)) i := 1 var rest []byte var block *pem.Block diff --git a/joinservice/internal/certcache/amdkds/amdkds_test.go b/joinservice/internal/certcache/amdkds/amdkds_test.go index 1ce3706a9..d7ede82f9 100644 --- a/joinservice/internal/certcache/amdkds/amdkds_test.go +++ b/joinservice/internal/certcache/amdkds/amdkds_test.go @@ -71,6 +71,6 @@ type stubGetter struct { } func (s *stubGetter) Get(url string) ([]byte, error) { - s.log.Debug(fmt.Sprintf("Request to %s", url)) + s.log.Debug(fmt.Sprintf("Request to %q", url)) return s.ret, s.err } diff --git a/joinservice/internal/certcache/certcache.go b/joinservice/internal/certcache/certcache.go index cada6fd7c..ba88b67b4 100644 --- a/joinservice/internal/certcache/certcache.go +++ b/joinservice/internal/certcache/certcache.go @@ -53,11 +53,11 @@ func (c *Client) CreateCertChainCache(ctx context.Context) (*CachedCerts, error) case variant.AWSSEVSNP{}: reportSigner = abi.VlekReportSigner default: - c.log.Debug(fmt.Sprintf("No certificate chain caching possible for attestation variant %s", c.attVariant)) + c.log.Debug(fmt.Sprintf("No certificate chain caching possible for attestation variant %q", c.attVariant)) return nil, nil } - c.log.Debug(fmt.Sprintf("Creating %s certificate chain cache", c.attVariant)) + c.log.Debug(fmt.Sprintf("Creating %q certificate chain cache", c.attVariant)) ask, ark, err := c.createCertChainCache(ctx, reportSigner) if err != nil { return nil, fmt.Errorf("creating %s certificate chain cache: %w", c.attVariant, err) diff --git a/joinservice/internal/watcher/validator.go b/joinservice/internal/watcher/validator.go index 6bf43635e..2d6a3bd3b 100644 --- a/joinservice/internal/watcher/validator.go +++ b/joinservice/internal/watcher/validator.go @@ -79,7 +79,7 @@ func (u *Updatable) Update() error { if err != nil { return fmt.Errorf("unmarshaling config: %w", err) } - u.log.Debug(fmt.Sprintf("New expected measurements: %+v", cfg.GetMeasurements())) + u.log.Debug(fmt.Sprintf("New expected measurements: %s", cfg.GetMeasurements().String())) cfgWithCerts, err := u.configWithCerts(cfg) if err != nil {