fixed final issues

fixed format strings for slog
This commit is contained in:
miampf 2024-01-15 14:22:01 +01:00
parent 69556e84d9
commit 941889ade7
No known key found for this signature in database
GPG key ID: 376EAC0E5307A669
58 changed files with 191 additions and 2210 deletions

View file

@ -13,6 +13,7 @@ import (
"log/slog"
"os"
"strconv"
"fmt"
"github.com/spf13/afero"

View file

@ -442,7 +442,7 @@ func (a *applyCmd) apply(
func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationconfigapi.Fetcher) (*config.Config, *state.State, error) {
// Read user's config and state file
a.log.Debug("Reading config from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
a.log.Debug(fmt.Sprintf("Reading config from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
conf, err := config.New(a.fileHandler, constants.ConfigFilename, configFetcher, a.flags.force)
var configValidationErr *config.ValidationError
if errors.As(err, &configValidationErr) {
@ -452,7 +452,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
return nil, nil, err
}
a.log.Debug("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
a.log.Debug(fmt.Sprintf("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)))
stateFile, err := state.CreateOrRead(a.fileHandler, constants.StateFilename)
if err != nil {
return nil, nil, err
@ -521,10 +521,10 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
// If we need to run the init RPC, the version has to be valid
// Otherwise, we are able to use an outdated version, meaning we skip the K8s upgrade
// We skip version validation if the user explicitly skips the Kubernetes phase
a.log.Debug("Validating Kubernetes version %s", conf.KubernetesVersion)
a.log.Debug(fmt.Sprintf("Validating Kubernetes version %s", conf.KubernetesVersion))
validVersion, err := versions.NewValidK8sVersion(string(conf.KubernetesVersion), true)
if err != nil {
a.log.Debug("Kubernetes version not valid: %s", err)
a.log.Debug(fmt.Sprintf("Kubernetes version not valid: %s", err))
if !a.flags.skipPhases.contains(skipInitPhase) {
return nil, nil, err
}
@ -563,7 +563,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
cmd.PrintErrf("Warning: Constellation with Kubernetes %s is still in preview. Use only for evaluation purposes.\n", validVersion)
}
conf.KubernetesVersion = validVersion
a.log.Debug("Target Kubernetes version set to %s", conf.KubernetesVersion)
a.log.Debug(fmt.Sprintf("Target Kubernetes version set to %s", conf.KubernetesVersion))
// Validate microservice version (helm versions) in the user's config matches the version of the CLI
// This makes sure we catch potential errors early, not just after we already ran Terraform migrations or the init RPC
@ -591,7 +591,7 @@ func (a *applyCmd) applyJoinConfig(cmd *cobra.Command, newConfig config.Attestat
) error {
clusterAttestationConfig, err := a.applier.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant())
if err != nil {
a.log.Debug("Getting cluster attestation config failed: %s", err)
a.log.Debug(fmt.Sprintf("Getting cluster attestation config failed: %s", err))
if k8serrors.IsNotFound(err) {
a.log.Debug("Creating new join config")
return a.applier.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt)

View file

@ -108,7 +108,7 @@ func (a *applyCmd) backupHelmCharts(
if err := executor.SaveCharts(chartDir, a.fileHandler); err != nil {
return fmt.Errorf("saving Helm charts to disk: %w", err)
}
a.log.Debug("Helm charts saved to %s", a.flags.pathPrefixer.PrefixPrintablePath(chartDir))
a.log.Debug(fmt.Sprintf("Helm charts saved to %s", a.flags.pathPrefixer.PrefixPrintablePath(chartDir)))
if includesUpgrades {
a.log.Debug("Creating backup of CRDs and CRs")

View file

@ -29,7 +29,7 @@ import (
// On success, it writes the Kubernetes admin config file to disk.
// Therefore it is skipped if the Kubernetes admin config file already exists.
func (a *applyCmd) runInit(cmd *cobra.Command, conf *config.Config, stateFile *state.State) (*bytes.Buffer, error) {
a.log.Debug("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())
a.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()))
validator, err := choose.Validator(conf.GetAttestationConfig(), a.wLog)
if err != nil {
return nil, fmt.Errorf("creating validator: %w", err)
@ -121,7 +121,7 @@ func (a *applyCmd) writeInitOutput(
if err := a.fileHandler.Write(constants.AdminConfFilename, initResp.Kubeconfig, file.OptNone); err != nil {
return fmt.Errorf("writing kubeconfig: %w", err)
}
a.log.Debug("Kubeconfig written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
a.log.Debug(fmt.Sprintf("Kubeconfig written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)))
if mergeConfig {
if err := a.merger.mergeConfigs(constants.AdminConfFilename, a.fileHandler); err != nil {
@ -136,7 +136,7 @@ func (a *applyCmd) writeInitOutput(
return fmt.Errorf("writing Constellation state file: %w", err)
}
a.log.Debug("Constellation state file written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
a.log.Debug(fmt.Sprintf("Constellation state file written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)))
if !mergeConfig {
fmt.Fprintln(wr, "You can now connect to your cluster by executing:")

View file

@ -76,7 +76,7 @@ func (a *applyCmd) planTerraformChanges(cmd *cobra.Command, conf *config.Config,
//
// var manualMigrations []terraform.StateMigration
// for _, migration := range manualMigrations {
// u.log.Debug("Adding manual Terraform migration: %s", migration.DisplayName)
// u.log.Debug(fmt.Sprintf("Adding manual Terraform migration: %s", migration.DisplayName))
// u.infraApplier.AddManualStateMigration(migration)
// }
@ -186,7 +186,7 @@ func printCreateInfo(out io.Writer, conf *config.Config, log debugLog) error {
}
}
if len(otherGroupNames) > 0 {
log.Debug("Creating %d additional node groups: %v", len(otherGroupNames), otherGroupNames)
log.Debug(fmt.Sprintf("Creating %d additional node groups: %v", len(otherGroupNames), otherGroupNames))
}
fmt.Fprintf(out, "The following Constellation cluster will be created:\n")

View file

@ -104,7 +104,7 @@ func runConfigFetchMeasurements(cmd *cobra.Command, _ []string) error {
if err := cfm.flags.parse(cmd.Flags()); err != nil {
return fmt.Errorf("parsing flags: %w", err)
}
cfm.log.Debug("Using flags %+v", cfm.flags)
cfm.log.Debug(fmt.Sprintf("Using flags %+v", cfm.flags))
fetcher := attestationconfigapi.NewFetcherWithClient(http.DefaultClient, constants.CDNRepositoryURL)
return cfm.configFetchMeasurements(cmd, fileHandler, fetcher)
@ -118,7 +118,7 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements(
return errors.New("fetching measurements is not supported")
}
cfm.log.Debug("Loading configuration file from %q", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
cfm.log.Debug(fmt.Sprintf("Loading configuration file from %q", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
conf, err := config.New(fileHandler, constants.ConfigFilename, fetcher, cfm.flags.force)
var configValidationErr *config.ValidationError
@ -152,14 +152,14 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements(
return fmt.Errorf("fetching and verifying measurements: %w", err)
}
}
cfm.log.Debug("Measurements: %#v\n", fetchedMeasurements)
cfm.log.Debug(fmt.Sprintf("Measurements: %#v\n", fetchedMeasurements))
cfm.log.Debug("Updating measurements in configuration")
conf.UpdateMeasurements(fetchedMeasurements)
if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil {
return err
}
cfm.log.Debug("Configuration written to %s", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
cfm.log.Debug(fmt.Sprintf("Configuration written to %s", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
cmd.Print("Successfully fetched measurements and updated Configuration\n")
return nil
}

View file

@ -85,13 +85,13 @@ func runConfigGenerate(cmd *cobra.Command, args []string) error {
if err := cg.flags.parse(cmd.Flags()); err != nil {
return fmt.Errorf("parsing flags: %w", err)
}
log.Debug("Parsed flags as %+v", cg.flags)
log.Debug(fmt.Sprintf("Parsed flags as %+v", cg.flags))
return cg.configGenerate(cmd, fileHandler, provider, args[0])
}
func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file.Handler, provider cloudprovider.Provider, rawProvider string) error {
cg.log.Debug("Using cloud provider %s", provider.String())
cg.log.Debug(fmt.Sprintf("Using cloud provider %s", provider.String()))
// Config creation
conf, err := createConfigWithAttestationVariant(provider, rawProvider, cg.flags.attestationVariant)

View file

@ -133,7 +133,7 @@ func (c *iamCreator) create(ctx context.Context) error {
var conf config.Config
if c.flags.updateConfig {
c.log.Debug("Parsing config %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
c.log.Debug(fmt.Sprintf("Parsing config %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
if err := c.fileHandler.ReadYAML(constants.ConfigFilename, &conf); err != nil {
return fmt.Errorf("error reading the configuration file: %w", err)
}
@ -161,7 +161,7 @@ func (c *iamCreator) create(ctx context.Context) error {
}
if c.flags.updateConfig {
c.log.Debug("Writing IAM configuration to %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
c.log.Debug(fmt.Sprintf("Writing IAM configuration to %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
c.providerCreator.writeOutputValuesToConfig(&conf, iamFile)
if err := c.fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil {
return err

View file

@ -77,25 +77,25 @@ type destroyCmd struct {
func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destroyer iamDestroyer, fsHandler file.Handler) error {
// check if there is a possibility that the cluster is still running by looking out for specific files
c.log.Debug("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
c.log.Debug(fmt.Sprintf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)))
if _, err := fsHandler.Stat(constants.AdminConfFilename); !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
}
c.log.Debug("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
c.log.Debug(fmt.Sprintf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)))
if _, err := fsHandler.Stat(constants.StateFilename); !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
}
gcpFileExists := false
c.log.Debug("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
c.log.Debug(fmt.Sprintf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)))
if _, err := fsHandler.Stat(constants.GCPServiceAccountKeyFilename); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return err
}
} else {
c.log.Debug("%q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
c.log.Debug(fmt.Sprintf("%q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)))
gcpFileExists = true
}
@ -116,7 +116,7 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
}
if gcpFileExists {
c.log.Debug("Starting to delete %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
c.log.Debug(fmt.Sprintf("Starting to delete %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)))
proceed, err := c.deleteGCPServiceAccountKeyFile(cmd, destroyer, fsHandler)
if err != nil {
return err
@ -143,7 +143,7 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
func (c *destroyCmd) deleteGCPServiceAccountKeyFile(cmd *cobra.Command, destroyer iamDestroyer, fsHandler file.Handler) (bool, error) {
var fileSaKey gcpshared.ServiceAccountKey
c.log.Debug("Parsing %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
c.log.Debug(fmt.Sprintf("Parsing %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)))
if err := fsHandler.ReadJSON(constants.GCPServiceAccountKeyFilename, &fileSaKey); err != nil {
return false, err
}
@ -168,6 +168,6 @@ func (c *destroyCmd) deleteGCPServiceAccountKeyFile(cmd *cobra.Command, destroye
return false, err
}
c.log.Debug("Successfully deleted %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))
c.log.Debug(fmt.Sprintf("Successfully deleted %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)))
return true, nil
}

View file

@ -72,7 +72,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand
clientcmd.RecommendedHomeFile,
configPath, // our config should overwrite the default config
}
c.log.Debug("Kubeconfig file loading precedence: %v", loadingRules.Precedence)
c.log.Debug(fmt.Sprintf("Kubeconfig file loading precedence: %v", loadingRules.Precedence))
// merge the kubeconfigs
cfg, err := loadingRules.Load()
@ -82,7 +82,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand
// Set the current context to the cluster we just created
cfg.CurrentContext = constellConfig.CurrentContext
c.log.Debug("Set current context to %s", cfg.CurrentContext)
c.log.Debug(fmt.Sprintf("Set current context to %s", cfg.CurrentContext))
json, err := runtime.Encode(clientcodec.Codec, cfg)
if err != nil {
@ -97,7 +97,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand
if err := fileHandler.Write(clientcmd.RecommendedHomeFile, mergedKubeconfig, file.OptOverwrite); err != nil {
return fmt.Errorf("writing merged kubeconfig to file: %w", err)
}
c.log.Debug("Merged kubeconfig into default config file: %s", clientcmd.RecommendedHomeFile)
c.log.Debug(fmt.Sprintf("Merged kubeconfig into default config file: %s", clientcmd.RecommendedHomeFile))
return nil
}

View file

@ -24,7 +24,7 @@ import (
// is used. It is a no-op in the open source version of Constellation.
func (a *applyCmd) checkLicenseFile(cmd *cobra.Command, csp cloudprovider.Provider) {
var licenseID string
a.log.Debugf("Running license check")
a.log.Debug("Running license check")
readBytes, err := a.fileHandler.Read(constants.LicenseFilename)
if errors.Is(err, fs.ErrNotExist) {
@ -52,5 +52,5 @@ func (a *applyCmd) checkLicenseFile(cmd *cobra.Command, csp cloudprovider.Provid
cmd.Printf("Please keep your vCPU quota (%d) in mind.\n", quota)
}
a.log.Debugf("Checked license")
a.log.Debug("Checked license")
}

View file

@ -56,7 +56,7 @@ func runPatchMAA(cmd *cobra.Command, args []string) error {
}
func (c *maaPatchCmd) patchMAA(cmd *cobra.Command, attestationURL string) error {
c.log.Debug("Using attestation URL %s", attestationURL)
c.log.Debug(fmt.Sprintf("Using attestation URL %s", attestationURL))
if err := c.patcher.Patch(cmd.Context(), attestationURL); err != nil {
return fmt.Errorf("patching MAA attestation policy: %w", err)

View file

@ -45,7 +45,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error {
if runtime.NumCPU() < 6 {
fmt.Fprintf(out, "WARNING: Only %d CPU cores available. This may cause performance issues.\n", runtime.NumCPU())
}
m.log.Debug("Checked CPU cores - there are %d", runtime.NumCPU())
m.log.Debug(fmt.Sprintf("Checked CPU cores - there are %d", runtime.NumCPU()))
// check memory
f, err := os.Open("/proc/meminfo")
@ -71,7 +71,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error {
if memGB < 6 {
fmt.Fprintln(out, "WARNING: Less than 6GB of memory available. This may cause performance issues.")
}
m.log.Debug("Checked available memory, you have %dGB available", memGB)
m.log.Debug(fmt.Sprintf("Checked available memory, you have %dGB available", memGB))
var stat unix.Statfs_t
if err := unix.Statfs(".", &stat); err != nil {
@ -81,7 +81,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error {
if freeSpaceGB < 20 {
return fmt.Errorf("insufficient disk space: %dGB, at least 20GB of disk space are required by MiniConstellation", freeSpaceGB)
}
m.log.Debug("Checked for free space available, you have %dGB available", freeSpaceGB)
m.log.Debug(fmt.Sprintf("Checked for free space available, you have %dGB available", freeSpaceGB))
return nil
}

View file

@ -84,7 +84,7 @@ func runRecover(cmd *cobra.Command, _ []string) error {
if err := r.flags.parse(cmd.Flags()); err != nil {
return err
}
r.log.Debug("Using flags: %+v", r.flags)
r.log.Debug(fmt.Sprintf("Using flags: %+v", r.flags))
return r.recover(cmd, fileHandler, 5*time.Second, &recoverDoer{log: r.log}, newDialer)
}
@ -93,12 +93,12 @@ func (r *recoverCmd) recover(
doer recoverDoerInterface, newDialer func(validator atls.Validator) *dialer.Dialer,
) error {
var masterSecret uri.MasterSecret
r.log.Debug("Loading master secret file from %s", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename))
r.log.Debug(fmt.Sprintf("Loading master secret file from %s", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename)))
if err := fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret); err != nil {
return err
}
r.log.Debug("Loading configuration file from %q", r.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
r.log.Debug(fmt.Sprintf("Loading configuration file from %q", r.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
conf, err := config.New(fileHandler, constants.ConfigFilename, r.configFetcher, r.flags.force)
var configValidationErr *config.ValidationError
if errors.As(err, &configValidationErr) {
@ -129,14 +129,14 @@ func (r *recoverCmd) recover(
conf.UpdateMAAURL(stateFile.Infrastructure.Azure.AttestationURL)
}
r.log.Debug("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())
r.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()))
validator, err := choose.Validator(conf.GetAttestationConfig(), warnLogger{cmd: cmd, log: r.log})
if err != nil {
return fmt.Errorf("creating new validator: %w", err)
}
r.log.Debug("Created a new validator")
doer.setDialer(newDialer(validator), endpoint)
r.log.Debug("Set dialer for endpoint %s", endpoint)
r.log.Debug(fmt.Sprintf("Set dialer for endpoint %s", endpoint))
doer.setURIs(masterSecret.EncodeToURI(), uri.NoStoreURI)
r.log.Debug("Set secrets")
if err := r.recoverCall(cmd.Context(), cmd.OutOrStdout(), interval, doer); err != nil {
@ -166,7 +166,7 @@ func (r *recoverCmd) recoverCall(ctx context.Context, out io.Writer, interval ti
})
}
r.log.Debug("Encountered error (retriable: %t): %s", retry, err)
r.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %s", retry, err))
return retry
}
@ -179,7 +179,7 @@ func (r *recoverCmd) recoverCall(ctx context.Context, out io.Writer, interval ti
fmt.Fprintln(out, "Pushed recovery key.")
ctr++
}
r.log.Debug("Retry counter is %d", ctr)
r.log.Debug(fmt.Sprintf("Retry counter is %d", ctr))
if ctr > 0 {
fmt.Fprintf(out, "Recovered %d control-plane nodes.\n", ctr)
} else if grpcRetry.ServiceIsUnavailable(err) {

View file

@ -187,7 +187,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco
// get current image version of the cluster
csp := conf.GetProvider()
attestationVariant := conf.GetAttestationConfig().GetVariant()
u.log.Debug("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String())
u.log.Debug(fmt.Sprintf("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String()))
current, err := u.collect.currentVersions(cmd.Context())
if err != nil {
@ -198,18 +198,18 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco
if err != nil {
return err
}
u.log.Debug("Current cli version: %s", current.cli)
u.log.Debug("Supported cli version(s): %s", supported.cli)
u.log.Debug("Current service version: %s", current.service)
u.log.Debug("Supported service version: %s", supported.service)
u.log.Debug("Current k8s version: %s", current.k8s)
u.log.Debug("Supported k8s version(s): %s", supported.k8s)
u.log.Debug(fmt.Sprintf("Current cli version: %s", current.cli))
u.log.Debug(fmt.Sprintf("Supported cli version(s): %s", supported.cli))
u.log.Debug(fmt.Sprintf("Current service version: %s", current.service))
u.log.Debug(fmt.Sprintf("Supported service version: %s", supported.service))
u.log.Debug(fmt.Sprintf("Current k8s version: %s", current.k8s))
u.log.Debug(fmt.Sprintf("Supported k8s version(s): %s", supported.k8s))
// Filter versions to only include upgrades
newServices := supported.service
if err := supported.service.IsUpgradeTo(current.service); err != nil {
newServices = consemver.Semver{}
u.log.Debug("No valid service upgrades are available from %q to %q. The minor version can only drift by 1.\n", current.service.String(), supported.service.String())
u.log.Debug(fmt.Sprintf("No valid service upgrades are available from %q to %q. The minor version can only drift by 1.\n", current.service.String(), supported.service.String()))
}
newKubernetes := filterK8sUpgrades(current.k8s, supported.k8s)
@ -343,7 +343,7 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide
// get expected measurements for each image
upgrades := make(map[string]measurements.M)
for _, version := range versions {
v.log.Debug("Fetching measurements for image: %s", version)
v.log.Debug(fmt.Sprintf("Fetching measurements for image: %s", version.Version()))
shortPath := version.ShortPath()
publicKey, err := keyselect.CosignPublicKeyForVersion(version)
@ -364,7 +364,7 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide
}
upgrades[shortPath] = measurements
}
v.log.Debug("Compatible image measurements are %v", upgrades)
v.log.Debug(fmt.Sprintf("Compatible image measurements are %v", upgrades))
return upgrades, nil
}
@ -452,9 +452,9 @@ func (v *versionCollector) newImages(ctx context.Context, currentImageVersion co
if err != nil {
return nil, fmt.Errorf("calculating next image minor version: %w", err)
}
v.log.Debug("Current image minor version is %s", currentImageMinorVer)
v.log.Debug("Current CLI minor version is %s", currentCLIMinorVer)
v.log.Debug("Next image minor version is %s", nextImageMinorVer)
v.log.Debug(fmt.Sprintf("Current image minor version is %s", currentImageMinorVer))
v.log.Debug(fmt.Sprintf("Current CLI minor version is %s", currentCLIMinorVer))
v.log.Debug(fmt.Sprintf("Next image minor version is %s", nextImageMinorVer))
allowedMinorVersions := []string{currentImageMinorVer, nextImageMinorVer}
switch cliImageCompare := semver.Compare(currentCLIMinorVer, currentImageMinorVer); {
@ -470,7 +470,7 @@ func (v *versionCollector) newImages(ctx context.Context, currentImageVersion co
case cliImageCompare > 0:
allowedMinorVersions = []string{currentImageMinorVer, nextImageMinorVer}
}
v.log.Debug("Allowed minor versions are %#v", allowedMinorVersions)
v.log.Debug(fmt.Sprintf("Allowed minor versions are %#v", allowedMinorVersions))
newerImages, err := v.newerVersions(ctx, allowedMinorVersions)
if err != nil {
@ -493,7 +493,7 @@ func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions []
patchList, err := v.verListFetcher.FetchVersionList(ctx, patchList)
var notFound *fetcher.NotFoundError
if errors.As(err, &notFound) {
v.log.Debug("Skipping version: %s", err)
v.log.Debug(fmt.Sprintf("Skipping version: %s", err))
continue
}
if err != nil {
@ -501,7 +501,7 @@ func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions []
}
updateCandidates = append(updateCandidates, patchList.StructuredVersions()...)
}
v.log.Debug("Update candidates are %v", updateCandidates)
v.log.Debug(fmt.Sprintf("Update candidates are %v", updateCandidates))
return updateCandidates, nil
}
@ -603,7 +603,7 @@ func getCompatibleImageMeasurements(ctx context.Context, writer io.Writer, clien
}
var fetchedMeasurements measurements.M
log.Debug("Fetching for measurement url: %s", measurementsURL)
log.Debug(fmt.Sprintf("Fetching for measurement url: %s", measurementsURL))
hash, err := fetchedMeasurements.FetchAndVerify(
ctx, client, cosign,
@ -657,7 +657,7 @@ func (v *versionCollector) newCLIVersions(ctx context.Context) ([]consemver.Semv
return nil, fmt.Errorf("parsing version %s: %w", version, err)
}
if err := target.IsUpgradeTo(v.cliVersion); err != nil {
v.log.Debug("Skipping incompatible minor version %q: %s", version, err)
v.log.Debug(fmt.Sprintf("Skipping incompatible minor version %q: %s", version, err))
continue
}
list := versionsapi.List{
@ -691,7 +691,7 @@ func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliP
var compatibleVersions []consemver.Semver
for _, version := range cliPatchVersions {
if err := version.IsUpgradeTo(v.cliVersion); err != nil {
v.log.Debug("Skipping incompatible patch version %q: %s", version, err)
v.log.Debug(fmt.Sprintf("Skipping incompatible patch version %q: %s", version, err))
continue
}
req := versionsapi.CLIInfo{

View file

@ -128,7 +128,7 @@ func runVerify(cmd *cobra.Command, _ []string) error {
if err := v.flags.parse(cmd.Flags()); err != nil {
return err
}
v.log.Debug("Using flags: %+v", v.flags)
v.log.Debug(fmt.Sprintf("Using flags: %+v", v.flags))
fetcher := attestationconfigapi.NewFetcher()
return v.verify(cmd, verifyClient, formatterFactory, fetcher)
}
@ -136,7 +136,7 @@ func runVerify(cmd *cobra.Command, _ []string) error {
type formatterFactory func(output string, attestation variant.Variant, log debugLog) (attestationDocFormatter, error)
func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factory formatterFactory, configFetcher attestationconfigapi.Fetcher) error {
c.log.Debug("Loading configuration file from %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
c.log.Debug(fmt.Sprintf("Loading configuration file from %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
conf, err := config.New(c.fileHandler, constants.ConfigFilename, configFetcher, c.flags.force)
var configValidationErr *config.ValidationError
if errors.As(err, &configValidationErr) {
@ -175,7 +175,7 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factor
return fmt.Errorf("updating expected PCRs: %w", err)
}
c.log.Debug("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())
c.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()))
validator, err := choose.Validator(attConfig, warnLogger{cmd: cmd, log: c.log})
if err != nil {
return fmt.Errorf("creating aTLS validator: %w", err)
@ -185,7 +185,7 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factor
if err != nil {
return fmt.Errorf("generating random nonce: %w", err)
}
c.log.Debug("Generated random nonce: %x", nonce)
c.log.Debug(fmt.Sprintf("Generated random nonce: %x", nonce))
rawAttestationDoc, err := verifyClient.Verify(
cmd.Context(),
@ -384,7 +384,7 @@ type constellationVerifier struct {
func (v *constellationVerifier) Verify(
ctx context.Context, endpoint string, req *verifyproto.GetAttestationRequest, validator atls.Validator,
) (string, error) {
v.log.Debug("Dialing endpoint: %q", endpoint)
v.log.Debug(fmt.Sprintf("Dialing endpoint: %q", endpoint))
conn, err := v.dialer.DialInsecure(ctx, endpoint)
if err != nil {
return "", fmt.Errorf("dialing init server: %w", err)

View file

@ -186,10 +186,10 @@ func (s *ServiceManager) OverrideServiceUnitExecStart(ctx context.Context, unitN
// do not return early here
// the "daemon-reload" command may return an unrelated error
// and there is no way to know if the override was successful
log.Warn("Failed to perform systemd daemon-reload: %v", err)
log.Warn(fmt.Sprintf("Failed to perform systemd daemon-reload: %v", err))
}
if err := s.SystemdAction(ctx, ServiceManagerRequest{Unit: unitName + ".service", Action: Restart}); err != nil {
log.Warn("Failed to perform unit restart: %v", err)
log.Warn(fmt.Sprintf("Failed to perform unit restart: %v", err))
return fmt.Errorf("performing systemd unit restart: %w", err)
}

View file

@ -53,7 +53,7 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov
logger.Info("Get flags from infos")
_, ok, err := infoMap.Get("logcollect")
if err != nil {
logger.Error("Getting infos: %v", err)
logger.Error(fmt.Sprintf("Getting infos: %v", err))
return
}
if !ok {
@ -63,27 +63,27 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov
cerdsGetter, err := newCloudCredentialGetter(ctx, provider, infoMap)
if err != nil {
logger.Error("Creating cloud credential getter: %v", err)
logger.Error(fmt.Sprintf("Creating cloud credential getter: %v", err))
return
}
logger.Info("Getting credentials")
creds, err := cerdsGetter.GetOpensearchCredentials(ctx)
if err != nil {
logger.Error("Getting opensearch credentials: %v", err)
logger.Error(fmt.Sprintf("Getting opensearch credentials: %v", err))
return
}
logger.Info(fmt.Sprintf("Getting logstash pipeline template from image %s", versions.LogstashImage))
tmpl, err := getTemplate(ctx, logger, versions.LogstashImage, "/run/logstash/templates/pipeline.conf", "/run/logstash")
if err != nil {
logger.Error("Getting logstash pipeline template: %v", err)
logger.Error(fmt.Sprintf("Getting logstash pipeline template: %v", err))
return
}
infoMapM, err := infoMap.GetCopy()
if err != nil {
logger.Error("Getting copy of map from info: %v", err)
logger.Error(fmt.Sprintf("Getting copy of map from info: %v", err))
return
}
infoMapM = filterInfoMap(infoMapM)
@ -97,14 +97,14 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov
Credentials: creds,
}
if err := writeTemplate("/run/logstash/pipeline/pipeline.conf", tmpl, pipelineConf); err != nil {
logger.Error("Writing logstash config: %v", err)
logger.Error(fmt.Sprintf("Writing logstash config: %v", err))
return
}
logger.Info(fmt.Sprintf("Getting filebeat config template from image %s", versions.FilebeatImage))
tmpl, err = getTemplate(ctx, logger, versions.FilebeatImage, "/run/filebeat/templates/filebeat.yml", "/run/filebeat")
if err != nil {
logger.Error("Getting filebeat config template: %v", err)
logger.Error(fmt.Sprintf("Getting filebeat config template: %v", err))
return
}
filebeatConf := filebeatConfInput{
@ -112,13 +112,13 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov
AddCloudMetadata: true,
}
if err := writeTemplate("/run/filebeat/filebeat.yml", tmpl, filebeatConf); err != nil {
logger.Error("Writing filebeat pipeline: %v", err)
logger.Error(fmt.Sprintf("Writing filebeat pipeline: %v", err))
return
}
logger.Info("Starting log collection pod")
if err := startPod(ctx, logger); err != nil {
logger.Error("Starting log collection: %v", err)
logger.Error(fmt.Sprintf("Starting log collection: %v", err))
}
}()
}

View file

@ -154,13 +154,13 @@ type maliciousJoiner struct {
// join issues a join request to the join service endpoint.
func (j *maliciousJoiner) join(ctx context.Context) (*joinproto.IssueJoinTicketResponse, error) {
j.logger.Debug("Dialing join service endpoint %s", j.endpoint, "")
j.logger.Debug(fmt.Sprintf("Dialing join service endpoint %s", j.endpoint))
conn, err := j.dialer.Dial(ctx, j.endpoint)
if err != nil {
return nil, fmt.Errorf("dialing join service endpoint: %w", err)
}
defer conn.Close()
j.logger.Debug("Successfully dialed join service endpoint %s", j.endpoint, "")
j.logger.Debug(fmt.Sprintf("Successfully dialed join service endpoint %s", j.endpoint))
protoClient := joinproto.NewAPIClient(conn)
@ -171,7 +171,7 @@ func (j *maliciousJoiner) join(ctx context.Context) (*joinproto.IssueJoinTicketR
IsControlPlane: false,
}
res, err := protoClient.IssueJoinTicket(ctx, req)
j.logger.Debug("Got join ticket response: %s", fmt.Sprintf("%+v", res), "")
j.logger.Debug(fmt.Sprintf("Got join ticket response: %+v", res))
if err != nil {
return nil, fmt.Errorf("issuing join ticket: %w", err)
}

View file

@ -121,7 +121,7 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
if err != nil {
return iss, err
}
log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
log.Info(fmt.Sprintf("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff))
return iss, nil
}
log.Info(fmt.Sprintf("Saving updated file: %s", bazelFile.RelPath))
@ -142,7 +142,7 @@ func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *bu
return err
}
rules.SetHash(rule, learnedHash)
log.Debug("Learned hash for rule %s: %s", rule.Name(), learnedHash)
log.Debug(fmt.Sprintf("Learned hash for rule %s: %s", rule.Name(), learnedHash))
return nil
}
@ -183,14 +183,14 @@ func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule,
}
if checkErr := mirrorUpload.Check(ctx, expectedHash); checkErr != nil {
log.Info("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash)
log.Info(fmt.Sprintf("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash))
if uploadErr := mirrorUpload.Mirror(ctx, expectedHash, rules.GetURLs(rule)); uploadErr != nil {
// don't try to fix the rule if the upload failed
iss = append(iss, uploadErr)
return changed, iss
}
} else {
log.Info("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash)
log.Info(fmt.Sprintf("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash))
}
// now the artifact is mirrored (if it wasn't already) and we can fix the rule

View file

@ -95,17 +95,17 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
}
for _, url := range urls {
m.log.Debug("Mirroring file with hash %v from %q", hash, url)
m.log.Debug(fmt.Sprintf("Mirroring file with hash %v from %q", hash, url))
body, err := m.downloadFromUpstream(ctx, url)
if err != nil {
m.log.Debug("Failed to download file from %q: %v", url, err)
m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err))
continue
}
defer body.Close()
streamedHash := sha256.New()
tee := io.TeeReader(body, streamedHash)
if err := m.put(ctx, hash, tee); err != nil {
m.log.Warn("Failed to stream file from upstream %q to mirror: %v.. Trying next url.", url, err)
m.log.Warn(fmt.Sprintf("Failed to stream file from upstream %q to mirror: %v.. Trying next url.", url, err))
continue
}
actualHash := hex.EncodeToString(streamedHash.Sum(nil))
@ -117,7 +117,7 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
if err != nil {
return err
}
m.log.Debug("File uploaded successfully to mirror from %q as %q", url, pubURL)
m.log.Debug(fmt.Sprintf("File uploaded successfully to mirror from %q as %q", url, pubURL))
return nil
}
return fmt.Errorf("failed to download / reupload file with hash %v from any of the urls: %v", hash, urls)
@ -129,16 +129,16 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) {
m.log.Debug(fmt.Sprintf("Learning new hash from %q", url))
body, err := m.downloadFromUpstream(ctx, url)
if err != nil {
m.log.Debug("Failed to download file from %q: %v", url, err)
m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err))
continue
}
defer body.Close()
streamedHash := sha256.New()
if _, err := io.Copy(streamedHash, body); err != nil {
m.log.Debug("Failed to stream file from %q: %v", url, err)
m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %v", url, err))
}
learnedHash := hex.EncodeToString(streamedHash.Sum(nil))
m.log.Debug("File successfully downloaded from %q with %q", url, learnedHash)
m.log.Debug(fmt.Sprintf("File successfully downloaded from %q with %q", url, learnedHash))
return learnedHash, nil
}
return "", fmt.Errorf("failed to download file / learn hash from any of the urls: %v", urls)
@ -157,7 +157,7 @@ func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
// It uses the authenticated CAS s3 endpoint to download the file metadata.
func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string) error {
key := path.Join(keyBase, expectedHash)
m.log.Debug("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key)
m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key))
attributes, err := m.objectStorageClient.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
Bucket: &m.bucket,
Key: &key,
@ -221,10 +221,10 @@ func (m *Maintainer) put(ctx context.Context, hash string, data io.Reader) error
key := path.Join(keyBase, hash)
if m.dryRun {
m.log.Debug("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key)
m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key))
return nil
}
m.log.Debug(fmt.Sprintf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key))
m.log.Debug(fmt.Sprintf(fmt.Sprintf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key)))
_, err := m.uploadClient.Upload(ctx, &s3.PutObjectInput{
Bucket: &m.bucket,
Key: &key,

View file

@ -121,7 +121,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
if err != nil {
return iss, err
}
log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
log.Info(fmt.Sprintf("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff))
return iss, nil
}
log.Info(fmt.Sprintf("Saving updated file: %s", bazelFile.RelPath))

View file

@ -10,6 +10,7 @@ package main
import (
"context"
"flag"
"fmt"
"log/slog"
"os"
@ -55,18 +56,18 @@ func main() {
c, cclose, err := versionsapi.NewClient(ctx, "eu-central-1", "cdn-constellation-backend", constants.CDNDefaultDistributionID, false, log)
if err != nil {
log.Error("creating s3 client: %w", err)
log.Error(fmt.Sprintf("creating s3 client: %s", err))
os.Exit(1)
}
defer func() {
if err := cclose(ctx); err != nil {
log.Error("invalidating cache: %w", err)
log.Error(fmt.Sprintf("invalidating cache: %s", err))
os.Exit(1)
}
}()
if err := c.UpdateCLIInfo(ctx, cliInfo); err != nil {
log.Error("updating cli info: %w", err)
log.Error(fmt.Sprintf("updating cli info: %s", err))
os.Exit(1)
}
}

View file

@ -55,7 +55,7 @@ func (s *Server) ListenAndServe(port string) error {
return err
}
s.log.Info("Starting QEMU metadata API on %s", lis.Addr())
s.log.Info(fmt.Sprintf("Starting QEMU metadata API on %s", lis.Addr()))
return server.Serve(lis)
}

View file

@ -1,106 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package cmd
import (
"fmt"
"io"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/osimage"
"github.com/edgelesssys/constellation/v2/internal/osimage/archive"
awsupload "github.com/edgelesssys/constellation/v2/internal/osimage/aws"
"github.com/spf13/cobra"
)
// newAWSCmd returns the command that uploads an OS image to AWS.
func newAWSCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "aws",
Short: "Upload OS image to AWS",
Long: "Upload OS image to AWS.",
Args: cobra.ExactArgs(0),
RunE: runAWS,
}
cmd.Flags().String("aws-region", "eu-central-1", "AWS region used during AMI creation")
cmd.Flags().String("aws-bucket", "constellation-images", "S3 bucket used during AMI creation")
return cmd
}
func runAWS(cmd *cobra.Command, _ []string) error {
workdir := os.Getenv("BUILD_WORKING_DIRECTORY")
if len(workdir) > 0 {
must(os.Chdir(workdir))
}
flags, err := parseAWSFlags(cmd)
if err != nil {
return err
}
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
return err
}
defer func() {
if err := archiveCClose(cmd.Context()); err != nil {
log.Error("closing archive client: %v", err)
}
}()
uploadC, err := awsupload.New(flags.awsRegion, flags.awsBucket, log)
if err != nil {
return fmt.Errorf("uploading image: %w", err)
}
file, err := os.Open(flags.rawImage)
if err != nil {
return fmt.Errorf("uploading image: opening image file %w", err)
}
defer file.Close()
size, err := file.Seek(0, io.SeekEnd)
if err != nil {
return err
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
out := cmd.OutOrStdout()
if len(flags.out) > 0 {
outF, err := os.Create(flags.out)
if err != nil {
return fmt.Errorf("uploading image: opening output file %w", err)
}
defer outF.Close()
out = outF
}
uploadReq := &osimage.UploadRequest{
Provider: flags.provider,
Version: flags.version,
AttestationVariant: flags.attestationVariant,
SecureBoot: flags.secureBoot,
Size: size,
Timestamp: flags.timestamp,
Image: file,
}
if flags.secureBoot {
sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki)
if err != nil {
return err
}
uploadReq.SBDatabase = sbDatabase
uploadReq.UEFIVarStore = uefiVarStore
}
return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out)
}

View file

@ -1,107 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package cmd
import (
"fmt"
"io"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/osimage"
"github.com/edgelesssys/constellation/v2/internal/osimage/archive"
azureupload "github.com/edgelesssys/constellation/v2/internal/osimage/azure"
"github.com/spf13/cobra"
)
// newAzureCmd returns the command that uploads an OS image to Azure.
func newAzureCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "azure",
Short: "Upload OS image to Azure",
Long: "Upload OS image to Azure.",
Args: cobra.ExactArgs(0),
RunE: runAzure,
}
cmd.Flags().String("az-subscription", "0d202bbb-4fa7-4af8-8125-58c269a05435", "Azure subscription to use")
cmd.Flags().String("az-location", "northeurope", "Azure location to use")
cmd.Flags().String("az-resource-group", "constellation-images", "Azure resource group to use")
return cmd
}
func runAzure(cmd *cobra.Command, _ []string) error {
workdir := os.Getenv("BUILD_WORKING_DIRECTORY")
if len(workdir) > 0 {
must(os.Chdir(workdir))
}
flags, err := parseAzureFlags(cmd)
if err != nil {
return err
}
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
return err
}
defer func() {
if err := archiveCClose(cmd.Context()); err != nil {
log.Error("closing archive client: %v", err)
}
}()
uploadC, err := azureupload.New(flags.azSubscription, flags.azLocation, flags.azResourceGroup, log)
if err != nil {
return fmt.Errorf("uploading image: %w", err)
}
file, err := os.Open(flags.rawImage)
if err != nil {
return fmt.Errorf("uploading image: opening image file %w", err)
}
defer file.Close()
size, err := file.Seek(0, io.SeekEnd)
if err != nil {
return err
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
out := cmd.OutOrStdout()
if len(flags.out) > 0 {
outF, err := os.Create(flags.out)
if err != nil {
return fmt.Errorf("uploading image: opening output file %w", err)
}
defer outF.Close()
out = outF
}
uploadReq := &osimage.UploadRequest{
Provider: flags.provider,
Version: flags.version,
AttestationVariant: flags.attestationVariant,
SecureBoot: flags.secureBoot,
Size: size,
Timestamp: flags.timestamp,
Image: file,
}
if flags.secureBoot {
sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki)
if err != nil {
return err
}
uploadReq.SBDatabase = sbDatabase
uploadReq.UEFIVarStore = uefiVarStore
}
return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out)
}

View file

@ -9,7 +9,6 @@ package cmd
import (
"errors"
"log/slog"
"os"
"path/filepath"
"strings"
@ -177,7 +176,7 @@ type uplosiFlags struct {
bucket string
distributionID string
logLevel zapcore.Level
logLevel slog.Level
}
func parseUplosiFlags(cmd *cobra.Command) (uplosiFlags, error) {
@ -272,9 +271,9 @@ func parseUplosiFlags(cmd *cobra.Command) (uplosiFlags, error) {
if err != nil {
return uplosiFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return uplosiFlags{

View file

@ -1,107 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package cmd
import (
"fmt"
"io"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/osimage"
"github.com/edgelesssys/constellation/v2/internal/osimage/archive"
gcpupload "github.com/edgelesssys/constellation/v2/internal/osimage/gcp"
"github.com/spf13/cobra"
)
// newGCPCommand returns the command that uploads an OS image to GCP.
func newGCPCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "gcp",
Short: "Upload OS image to GCP",
Long: "Upload OS image to GCP.",
Args: cobra.ExactArgs(0),
RunE: runGCP,
}
cmd.Flags().String("gcp-project", "constellation-images", "GCP project to use")
cmd.Flags().String("gcp-location", "europe-west3", "GCP location to use")
cmd.Flags().String("gcp-bucket", "constellation-os-images", "GCP bucket to use")
return cmd
}
func runGCP(cmd *cobra.Command, _ []string) error {
workdir := os.Getenv("BUILD_WORKING_DIRECTORY")
if len(workdir) > 0 {
must(os.Chdir(workdir))
}
flags, err := parseGCPFlags(cmd)
if err != nil {
return err
}
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
return err
}
defer func() {
if err := archiveCClose(cmd.Context()); err != nil {
log.Error("closing archive client: %v", err)
}
}()
uploadC, err := gcpupload.New(cmd.Context(), flags.gcpProject, flags.gcpLocation, flags.gcpBucket, log)
if err != nil {
return fmt.Errorf("uploading image: %w", err)
}
file, err := os.Open(flags.rawImage)
if err != nil {
return fmt.Errorf("uploading image: opening image file %w", err)
}
defer file.Close()
size, err := file.Seek(0, io.SeekEnd)
if err != nil {
return err
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
out := cmd.OutOrStdout()
if len(flags.out) > 0 {
outF, err := os.Create(flags.out)
if err != nil {
return fmt.Errorf("uploading image: opening output file %w", err)
}
defer outF.Close()
out = outF
}
uploadReq := &osimage.UploadRequest{
Provider: flags.provider,
Version: flags.version,
AttestationVariant: flags.attestationVariant,
SecureBoot: flags.secureBoot,
Size: size,
Timestamp: flags.timestamp,
Image: file,
}
if flags.secureBoot {
sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki)
if err != nil {
return err
}
uploadReq.SBDatabase = sbDatabase
uploadReq.UEFIVarStore = uefiVarStore
}
return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out)
}

View file

@ -62,7 +62,7 @@ func runInfo(cmd *cobra.Command, args []string) error {
}
defer func() {
if err := uploadCClose(cmd.Context()); err != nil {
log.Error("closing upload client: %v", err)
log.Error(fmt.Sprintf("closing upload client: %v", err))
}
}()

View file

@ -80,6 +80,6 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error {
if err != nil {
return fmt.Errorf("uploading image info: %w", err)
}
log.Info("Uploaded image measurements to %s (and signature to %s)", measurementsURL, signatureURL)
log.Info(fmt.Sprintf("Uploaded image measurements to %s (and signature to %s)", measurementsURL, signatureURL))
return nil
}

View file

@ -1,90 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package cmd
import (
"fmt"
"io"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/osimage"
"github.com/edgelesssys/constellation/v2/internal/osimage/archive"
nopupload "github.com/edgelesssys/constellation/v2/internal/osimage/nop"
"github.com/spf13/cobra"
)
func runNOP(cmd *cobra.Command, provider cloudprovider.Provider, _ []string) error {
workdir := os.Getenv("BUILD_WORKING_DIRECTORY")
if len(workdir) > 0 {
must(os.Chdir(workdir))
}
flags, err := parseCommonFlags(cmd)
if err != nil {
return err
}
flags.provider = provider
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
return err
}
defer func() {
if err := archiveCClose(cmd.Context()); err != nil {
log.Error("closing archive client: %v", err)
}
}()
uploadC := nopupload.New(log)
file, err := os.Open(flags.rawImage)
if err != nil {
return fmt.Errorf("uploading image: opening image file %w", err)
}
defer file.Close()
size, err := file.Seek(0, io.SeekEnd)
if err != nil {
return err
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
out := cmd.OutOrStdout()
if len(flags.out) > 0 {
outF, err := os.Create(flags.out)
if err != nil {
return fmt.Errorf("uploading image: opening output file %w", err)
}
defer outF.Close()
out = outF
}
uploadReq := &osimage.UploadRequest{
Provider: flags.provider,
Version: flags.version,
AttestationVariant: flags.attestationVariant,
SecureBoot: flags.secureBoot,
Size: size,
Timestamp: flags.timestamp,
Image: file,
}
if flags.secureBoot {
sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki)
if err != nil {
return err
}
uploadReq.SBDatabase = sbDatabase
uploadReq.UEFIVarStore = uefiVarStore
}
return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out)
}

View file

@ -9,12 +9,12 @@ package cmd
import (
"fmt"
"io"
"log/slog"
"os"
"strconv"
"time"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/osimage"
"github.com/edgelesssys/constellation/v2/internal/osimage/archive"
nopupload "github.com/edgelesssys/constellation/v2/internal/osimage/nop"
@ -58,8 +58,8 @@ func runUplosi(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel}))
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
@ -67,7 +67,7 @@ func runUplosi(cmd *cobra.Command, _ []string) error {
}
defer func() {
if err := archiveCClose(cmd.Context()); err != nil {
log.Errorf("closing archive client: %v", err)
log.Error(fmt.Sprintf("closing archive client: %v", err))
}
}()

View file

@ -197,7 +197,7 @@ func Fetch[T APIObject](ctx context.Context, c *Client, obj T) (T, error) {
Key: ptr(obj.JSONPath()),
}
c.Logger.Debug("Fetching %T from s3: %s", obj, obj.JSONPath())
c.Logger.Debug(fmt.Sprintf("Fetching %T from s3: %s", obj, obj.JSONPath()))
out, err := c.s3Client.GetObject(ctx, in)
var noSuchkey *s3types.NoSuchKey
if errors.As(err, &noSuchkey) {
@ -243,7 +243,7 @@ func Update(ctx context.Context, c *Client, obj APIObject) error {
c.dirtyPaths = append(c.dirtyPaths, "/"+obj.JSONPath())
c.Logger.Debug("Uploading %T to s3: %v", obj, obj.JSONPath())
c.Logger.Debug(fmt.Sprintf("Uploading %T to s3: %v", obj, obj.JSONPath()))
if _, err := c.Upload(ctx, in); err != nil {
return fmt.Errorf("uploading %T: %w", obj, err)
}
@ -306,7 +306,7 @@ func Delete(ctx context.Context, c *Client, obj APIObject) error {
Key: ptr(obj.JSONPath()),
}
c.Logger.Debug("Deleting %T from s3: %s", obj, obj.JSONPath())
c.Logger.Debug(fmt.Sprintf("Deleting %T from s3: %s", obj, obj.JSONPath()))
if _, err := c.DeleteObject(ctx, in); err != nil {
return fmt.Errorf("deleting s3 object at %s: %w", obj.JSONPath(), err)
}

View file

@ -112,24 +112,24 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, kind version
verList, err := client.FetchVersionList(ctx, verListReq)
var notFoundErr *apiclient.NotFoundError
if errors.As(err, &notFoundErr) {
log.Info("Version list for %s versions under %q does not exist. Creating new list", gran.String(), ver.Major())
log.Info(fmt.Sprintf("Version list for %s versions under %q does not exist. Creating new list", gran.String(), ver.Major()))
verList = verListReq
} else if err != nil {
return fmt.Errorf("failed to list minor versions: %w", err)
}
log.Debug("%s version list: %v", gran.String(), verList)
log.Debug(fmt.Sprintf("%s version list: %v", gran.String(), verList))
insertGran := gran + 1
insertVersion := ver.WithGranularity(insertGran)
if verList.Contains(insertVersion) {
log.Info("Version %q already exists in list %v", insertVersion, verList.Versions)
log.Info(fmt.Sprintf("Version %q already exists in list %v", insertVersion, verList.Versions))
return nil
}
log.Info("Inserting %s version %q into list", insertGran.String(), insertVersion)
log.Info(fmt.Sprintf("Inserting %s version %q into list", insertGran.String(), insertVersion))
verList.Versions = append(verList.Versions, insertVersion)
log.Debug("New %s version list: %v", gran.String(), verList)
log.Debug(fmt.Sprintf("New %s version list: %v", gran.String(), verList))
if err := client.UpdateVersionList(ctx, verList); err != nil {
return fmt.Errorf("failed to add %s version: %w", gran.String(), err)
@ -148,7 +148,7 @@ func updateLatest(ctx context.Context, client *versionsapi.Client, kind versions
latest, err := client.FetchVersionLatest(ctx, latest)
var notFoundErr *apiclient.NotFoundError
if errors.As(err, &notFoundErr) {
log.Debug("Latest version for ref %q and stream %q not found", ver.Ref(), ver.Stream())
log.Debug(fmt.Sprintf("Latest version for ref %q and stream %q not found", ver.Ref(), ver.Stream()))
} else if err != nil {
return fmt.Errorf("fetching latest version: %w", err)
}

View file

@ -71,7 +71,7 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) {
minorVersions, err = listMinorVersions(cmd.Context(), client, flags.ref, flags.stream)
var errNotFound *apiclient.NotFoundError
if err != nil && errors.As(err, &errNotFound) {
log.Info("No minor versions found for ref %q and stream %q.", flags.ref, flags.stream)
log.Info(fmt.Sprintf("No minor versions found for ref %q and stream %q.", flags.ref, flags.stream))
return nil
} else if err != nil {
return err

View file

@ -410,7 +410,7 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string,
snapshotID, err := a.getSnapshotID(ctx, ami, log)
if err != nil {
log.Warn("Failed to get AWS snapshot ID for image %s: %v", ami, err)
log.Warn(fmt.Sprintf("Failed to get AWS snapshot ID for image %s: %v", ami, err))
}
if err := a.deregisterImage(ctx, ami, dryrun, log); err != nil {
@ -686,10 +686,10 @@ type azImage struct {
func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Logger) (azImage, error) {
if m := azImageRegex.FindStringSubmatch(image); len(m) == 5 {
log.Debug(
log.Debug(fmt.Sprintf(
"Image matches local image format, resource group: %s, gallery: %s, image definition: %s, version: %s",
m[1], m[2], m[3], m[4],
)
))
return azImage{
resourceGroup: m[1],
gallery: m[2],

View file

@ -171,7 +171,7 @@ func (c *Client) deleteVersionFromMinorVersionList(ctx context.Context, ver Vers
}
if !minorList.Contains(ver.version) {
c.Client.Logger.Warn("Version %s is not in minor version list %s", ver.version, minorList.JSONPath())
c.Client.Logger.Warn(fmt.Sprintf("Version %s is not in minor version list %s", ver.version, minorList.JSONPath()))
c.Client.Logger.Warn("Skipping update of minor version list")
return nil, nil
}
@ -242,7 +242,7 @@ func (c *Client) deleteVersionFromLatest(ctx context.Context, ver Version, possi
return nil
}
c.Client.Logger.Info("Updating latest version from %s to %s", latest.Version, possibleNewLatest.Version)
c.Client.Logger.Info(fmt.Sprintf("Updating latest version from %s to %s", latest.Version, possibleNewLatest.Version))
if err := c.UpdateVersionLatest(ctx, *possibleNewLatest); err != nil {
return fmt.Errorf("updating latest version: %w", err)
}

View file

@ -212,18 +212,18 @@ func (v *Validator) checkIDKeyDigest(ctx context.Context, report *spb.Attestatio
// the MAA if necessary.
switch v.config.FirmwareSignerConfig.EnforcementPolicy {
case idkeydigest.MAAFallback:
v.log.Info(
v.log.Info(fmt.Sprintf(
"Configured idkeydigests %x don't contain reported idkeydigest %x, falling back to MAA validation",
v.config.FirmwareSignerConfig.AcceptedKeyDigests,
report.Report.IdKeyDigest,
)
))
return v.maa.validateToken(ctx, v.config.FirmwareSignerConfig.MAAURL, maaToken, extraData)
case idkeydigest.WarnOnly:
v.log.Warn(
v.log.Warn(fmt.Sprintf(
"Configured idkeydigests %x don't contain reported idkeydigest %x",
v.config.FirmwareSignerConfig.AcceptedKeyDigests,
report.Report.IdKeyDigest,
)
))
default:
return fmt.Errorf(
"configured idkeydigests %x don't contain reported idkeydigest %x",

View file

@ -57,7 +57,7 @@ func (a *InstanceInfo) addReportSigner(att *spb.Attestation, report *spb.Report,
// If the VCEK certificate is present, parse it and format it.
reportSigner, err := a.ParseReportSigner()
if err != nil {
logger.Warn("Error parsing report signer: %v", err)
logger.Warn(fmt.Sprintf("Error parsing report signer: %v", err))
}
signerInfo, err := abi.ParseSignerInfo(report.GetSignerInfo())
@ -123,7 +123,7 @@ func (a *InstanceInfo) AttestationWithCerts(getter trust.HTTPSGetter,
// If the certificate chain from THIM is present, parse it and format it.
ask, ark, err := a.ParseCertChain()
if err != nil {
logger.Warn("Error parsing certificate chain: %v", err)
logger.Warn(fmt.Sprintf("Error parsing certificate chain: %v", err))
}
if ask != nil {
logger.Info("Using ASK certificate from Azure THIM")
@ -140,16 +140,16 @@ func (a *InstanceInfo) AttestationWithCerts(getter trust.HTTPSGetter,
att.CertificateChain.AskCert = fallbackCerts.ask.Raw
}
if att.CertificateChain.ArkCert == nil && fallbackCerts.ark != nil {
logger.Info("Using ARK certificate from %s", constants.ConfigFilename)
logger.Info(fmt.Sprintf("Using ARK certificate from %s", constants.ConfigFilename))
att.CertificateChain.ArkCert = fallbackCerts.ark.Raw
}
// Otherwise, retrieve it from AMD KDS.
if att.CertificateChain.AskCert == nil || att.CertificateChain.ArkCert == nil {
logger.Info(
logger.Info(fmt.Sprintf(
"Certificate chain not fully present (ARK present: %t, ASK present: %t), falling back to retrieving it from AMD KDS",
(att.CertificateChain.ArkCert != nil),
(att.CertificateChain.AskCert != nil),
)
))
kdsCertChain, err := trust.GetProductChain(productName, signingInfo, getter)
if err != nil {
return nil, fmt.Errorf("retrieving certificate chain from AMD KDS: %w", err)

View file

@ -40,7 +40,7 @@ func (i *Issuer) Issue(_ context.Context, userData []byte, nonce []byte) (attDoc
i.log.Info("Issuing attestation statement")
defer func() {
if err != nil {
i.log.Warn("Failed to issue attestation document: %s", err)
i.log.Warn(fmt.Sprintf("Failed to issue attestation document: %s", err))
}
}()

View file

@ -52,7 +52,7 @@ func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte
v.log.Info("Validating attestation document")
defer func() {
if err != nil {
v.log.Warn("Failed to validate attestation document: %s", err)
v.log.Warn(fmt.Sprintf("Failed to validate attestation document: %s", err))
}
}()

View file

@ -106,7 +106,7 @@ func (i *Issuer) Issue(ctx context.Context, userData []byte, nonce []byte) (res
i.log.Info("Issuing attestation statement")
defer func() {
if err != nil {
i.log.Warn("Failed to issue attestation statement: %s", err)
i.log.Warn(fmt.Sprintf("Failed to issue attestation statement: %s", err))
}
}()
@ -180,7 +180,7 @@ func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte
v.log.Info("Validating attestation document")
defer func() {
if err != nil {
v.log.Warn("Failed to validate attestation document: %s", err)
v.log.Warn(fmt.Sprintf("Failed to validate attestation document: %s", err))
}
}()

View file

@ -87,7 +87,7 @@ func (a *Applier) SetKubeConfig(kubeConfig []byte) error {
// CheckLicense checks the given Constellation license with the license server
// and returns the allowed quota for the license.
func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider, initRequest bool, licenseID string) (int, error) {
a.log.Debug("Contacting license server for license '%s'", licenseID)
a.log.Debug(fmt.Sprintf("Contacting license server for license '%s'", licenseID))
var action license.Action
if initRequest {
@ -103,7 +103,7 @@ func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider,
if err != nil {
return 0, fmt.Errorf("checking license: %w", err)
}
a.log.Debug("Got response from license server for license '%s'", licenseID)
a.log.Debug(fmt.Sprintf("Got response from license server for license '%s'", licenseID))
return quota, nil
}

View file

@ -85,12 +85,12 @@ func (a *Applier) Init(
// Create a wrapper function that allows logging any returned error from the retrier before checking if it's the expected retriable one.
serviceIsUnavailable := func(err error) bool {
isServiceUnavailable := grpcRetry.ServiceIsUnavailable(err)
a.log.Debug("Encountered error (retriable: %t): %s", isServiceUnavailable, err)
a.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %s", isServiceUnavailable, err))
return isServiceUnavailable
}
// Perform the RPC
a.log.Debug("Making initialization call, doer is %+v", doer)
a.log.Debug(fmt.Sprintf("Making initialization call, doer is %+v", doer))
a.spinner.Start("Connecting ", false)
retrier := retry.NewIntervalRetrier(doer, 30*time.Second, serviceIsUnavailable)
if err := retrier.Do(ctx); err != nil {
@ -99,7 +99,7 @@ func (a *Applier) Init(
a.spinner.Stop()
a.log.Debug("Initialization request finished")
a.log.Debug("Rewriting cluster server address in kubeconfig to %s", state.Infrastructure.ClusterEndpoint)
a.log.Debug(fmt.Sprintf("Rewriting cluster server address in kubeconfig to %s", state.Infrastructure.ClusterEndpoint))
kubeconfig, err := clientcmd.Load(doer.resp.Kubeconfig)
if err != nil {
return InitOutput{}, fmt.Errorf("loading kubeconfig: %w", err)
@ -175,7 +175,7 @@ func (d *initDoer) Do(ctx context.Context) error {
conn, err := d.dialer.Dial(ctx, d.endpoint)
if err != nil {
d.log.Debug("Dialing init server failed: %s. Retrying...", err)
d.log.Debug(fmt.Sprintf("Dialing init server failed: %s. Retrying...", err))
return fmt.Errorf("dialing init server: %w", err)
}
defer conn.Close()
@ -200,7 +200,7 @@ func (d *initDoer) Do(ctx context.Context) error {
res, err := resp.Recv() // get first response, either success or failure
if err != nil {
if e := d.getLogs(resp); e != nil {
d.log.Debug("Failed to collect logs: %s", e)
d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e))
return &NonRetriableInitError{
LogCollectionErr: e,
Err: err,
@ -214,7 +214,7 @@ func (d *initDoer) Do(ctx context.Context) error {
d.resp = res.GetInitSuccess()
case *initproto.InitResponse_InitFailure:
if e := d.getLogs(resp); e != nil {
d.log.Debug("Failed to get logs from cluster: %s", e)
d.log.Debug(fmt.Sprintf("Failed to get logs from cluster: %s", e))
return &NonRetriableInitError{
LogCollectionErr: e,
Err: errors.New(res.GetInitFailure().GetError()),
@ -225,7 +225,7 @@ func (d *initDoer) Do(ctx context.Context) error {
d.log.Debug("Cluster returned nil response type")
err = errors.New("empty response from cluster")
if e := d.getLogs(resp); e != nil {
d.log.Debug("Failed to collect logs: %s", e)
d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e))
return &NonRetriableInitError{
LogCollectionErr: e,
Err: err,
@ -236,7 +236,7 @@ func (d *initDoer) Do(ctx context.Context) error {
d.log.Debug("Cluster returned unknown response type")
err = errors.New("unknown response from cluster")
if e := d.getLogs(resp); e != nil {
d.log.Debug("Failed to collect logs: %s", e)
d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e))
return &NonRetriableInitError{
LogCollectionErr: e,
Err: err,

View file

@ -90,15 +90,15 @@ func (a actionFactory) appendNewAction(
)
}
a.log.Debug("release %s not found, adding to new releases...", release.releaseName)
a.log.Debug(fmt.Sprintf("release %s not found, adding to new releases...", release.releaseName))
*actions = append(*actions, a.newInstall(release, timeout))
return nil
}
if err != nil {
return fmt.Errorf("getting version for %s: %w", release.releaseName, err)
}
a.log.Debug("Current %s version: %s", release.releaseName, currentVersion)
a.log.Debug("New %s version: %s", release.releaseName, newVersion)
a.log.Debug(fmt.Sprintf("Current %s version: %s", release.releaseName, currentVersion))
a.log.Debug(fmt.Sprintf("New %s version: %s", release.releaseName, newVersion))
if !force {
// For charts we package ourselves, the version is equal to the CLI version (charts are embedded in the binary).
@ -132,7 +132,7 @@ func (a actionFactory) appendNewAction(
release.releaseName == certManagerInfo.releaseName {
return ErrConfirmationMissing
}
a.log.Debug("Upgrading %s from %s to %s", release.releaseName, currentVersion, newVersion)
a.log.Debug(fmt.Sprintf("Upgrading %s from %s to %s", release.releaseName, currentVersion, newVersion))
*actions = append(*actions, a.newUpgrade(release, timeout))
return nil
}
@ -162,7 +162,7 @@ func (a actionFactory) updateCRDs(ctx context.Context, chart *chart.Chart) error
for _, dep := range chart.Dependencies() {
for _, crdFile := range dep.Files {
if strings.HasPrefix(crdFile.Name, "crds/") {
a.log.Debug("Updating crd: %s", crdFile.Name)
a.log.Debug(fmt.Sprintf("Updating crd: %s", crdFile.Name))
err := a.kubeClient.ApplyCRD(ctx, crdFile.Data)
if err != nil {
return err

View file

@ -133,7 +133,7 @@ type ChartApplyExecutor struct {
// Apply applies the charts in order.
func (c ChartApplyExecutor) Apply(ctx context.Context) error {
for _, action := range c.actions {
c.log.Debug("Applying %q", action.ReleaseName())
c.log.Debug(fmt.Sprintf("Applying %q", action.ReleaseName()))
if err := action.Apply(ctx); err != nil {
return fmt.Errorf("applying %s: %w", action.ReleaseName(), err)
}

View file

@ -49,7 +49,7 @@ func retryApply(ctx context.Context, action retrieableApplier, retryInterval tim
return fmt.Errorf("helm install: %w", err)
}
retryLoopFinishDuration := time.Since(retryLoopStartTime)
log.Debug("Helm chart %q installation finished after %s", action.ReleaseName(), retryLoopFinishDuration)
log.Debug(fmt.Sprintf("Helm chart %q installation finished after %s", action.ReleaseName(), retryLoopFinishDuration))
return nil
}
@ -61,9 +61,9 @@ type applyDoer struct {
// Do tries to apply the action.
func (i applyDoer) Do(ctx context.Context) error {
i.log.Debug("Trying to apply Helm chart %s", i.applier.ReleaseName())
i.log.Debug(fmt.Sprintf("Trying to apply Helm chart %s", i.applier.ReleaseName()))
if err := i.applier.apply(ctx); err != nil {
i.log.Debug("Helm chart installation %s failed: %v", i.applier.ReleaseName(), err)
i.log.Debug(fmt.Sprintf("Helm chart installation %s failed: %v", i.applier.ReleaseName(), err))
return err
}

View file

@ -39,7 +39,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr
for i := range crds {
path := filepath.Join(crdBackupFolder, crds[i].Name+".yaml")
k.log.Debug("Creating CRD backup: %s", path)
k.log.Debug(fmt.Sprintf("Creating CRD backup: %s", path))
// We have to manually set kind/apiversion because of a long-standing limitation of the API:
// https://github.com/kubernetes/kubernetes/issues/3030#issuecomment-67543738
@ -64,7 +64,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr
func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error {
k.log.Debug("Starting CR backup")
for _, crd := range crds {
k.log.Debug("Creating backup for resource type: %s", crd.Name)
k.log.Debug(fmt.Sprintf("Creating backup for resource type: %s", crd.Name))
// Iterate over all versions of the CRD
// TODO(daniel-weisse): Consider iterating over crd.Status.StoredVersions instead
@ -72,7 +72,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds
// a version that is not installed in the cluster.
// With the StoredVersions field, we could only iterate over the installed versions.
for _, version := range crd.Spec.Versions {
k.log.Debug("Creating backup of CRs for %q at version %q", crd.Name, version.Name)
k.log.Debug(fmt.Sprintf("Creating backup of CRs for %q at version %q", crd.Name, version.Name))
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version.Name, Resource: crd.Spec.Names.Plural}
crs, err := k.kubectl.ListCRs(ctx, gvr)
@ -80,7 +80,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds
if !k8serrors.IsNotFound(err) {
return fmt.Errorf("retrieving CR %s: %w", crd.Name, err)
}
k.log.Debug("No CRs found for %q at version %q, skipping...", crd.Name, version.Name)
k.log.Debug(fmt.Sprintf("No CRs found for %q at version %q, skipping...", crd.Name, version.Name))
continue
}
@ -101,7 +101,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds
}
}
k.log.Debug("Backup for resource type %q complete", crd.Name)
k.log.Debug(fmt.Sprintf("Backup for resource type %q complete", crd.Name))
}
k.log.Debug("CR backup complete")
return nil

View file

@ -214,13 +214,13 @@ func (k *KubeCmd) ApplyJoinConfig(ctx context.Context, newAttestConfig config.At
return fmt.Errorf("getting %s ConfigMap: %w", constants.JoinConfigMap, err)
}
k.log.Debug("ConfigMap %q does not exist in namespace %q, creating it now", constants.JoinConfigMap, constants.ConstellationNamespace)
k.log.Debug(fmt.Sprintf("ConfigMap %q does not exist in namespace %q, creating it now", constants.JoinConfigMap, constants.ConstellationNamespace))
if err := retryAction(ctx, k.retryInterval, maxRetryAttempts, func(ctx context.Context) error {
return k.kubectl.CreateConfigMap(ctx, joinConfigMap(newConfigJSON, measurementSalt))
}, k.log); err != nil {
return fmt.Errorf("creating join-config ConfigMap: %w", err)
}
k.log.Debug("Created %q ConfigMap in namespace %q", constants.JoinConfigMap, constants.ConstellationNamespace)
k.log.Debug(fmt.Sprintf("Created %q ConfigMap in namespace %q", constants.JoinConfigMap, constants.ConstellationNamespace))
return nil
}
@ -266,7 +266,7 @@ func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNa
k.log.Debug("No new SANs to add to the cluster's apiserver SAN field")
return nil
}
k.log.Debug("Extending the cluster's apiserver SAN field with the following SANs: %s\n", strings.Join(missingSANs, ", "))
k.log.Debug(fmt.Sprintf("Extending the cluster's apiserver SAN field with the following SANs: %s\n", strings.Join(missingSANs, ", ")))
clusterConfiguration.APIServer.CertSANs = append(clusterConfiguration.APIServer.CertSANs, missingSANs...)
sort.Strings(clusterConfiguration.APIServer.CertSANs)
@ -409,7 +409,7 @@ func (k *KubeCmd) prepareUpdateK8s(nodeVersion *updatev1alpha1.NodeVersion, newC
}
}
k.log.Debug("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
k.log.Debug(fmt.Sprintf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion))
nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name
nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion
@ -461,7 +461,7 @@ func retryGetJoinConfig(ctx context.Context, kubectl kubectlInterface, retryInte
return false
}
retries++
log.Debug("Getting join-config ConfigMap failed (attempt %d/%d): %s", retries, maxRetryAttempts, err)
log.Debug(fmt.Sprintf("Getting join-config ConfigMap failed (attempt %d/%d): %s", retries, maxRetryAttempts, err))
return retries < maxRetryAttempts
}
@ -483,7 +483,7 @@ func retryAction(ctx context.Context, retryInterval time.Duration, maxRetries in
ctr := 0
retrier := conretry.NewIntervalRetrier(&kubeDoer{action: action}, retryInterval, func(err error) bool {
ctr++
log.Debug("Action failed (attempt %d/%d): %s", ctr, maxRetries, err)
log.Debug(fmt.Sprintf("Action failed (attempt %d/%d): %s", ctr, maxRetries, err))
return ctr < maxRetries
})
return retrier.Do(ctx)

View file

@ -9,6 +9,7 @@ package grpclog
import (
"context"
"fmt"
"sync"
"google.golang.org/grpc/connectivity"
@ -30,15 +31,15 @@ func LogStateChangesUntilReady(ctx context.Context, conn getStater, log debugLog
go func() {
defer wg.Done()
state := conn.GetState()
log.Debug("Connection state started as %s", state)
log.Debug(fmt.Sprintf("Connection state started as %s", state))
for ; state != connectivity.Ready && conn.WaitForStateChange(ctx, state); state = conn.GetState() {
log.Debug("Connection state changed to %s", state)
log.Debug(fmt.Sprintf("Connection state changed to %s", state))
}
if state == connectivity.Ready {
log.Debug("Connection ready")
isReadyCallback()
} else {
log.Debug("Connection state ended with %s", state)
log.Debug(fmt.Sprintf("Connection state ended with %s", state))
}
}()
}

View file

@ -1,603 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
// package aws implements uploading os images to aws.
package aws
import (
"context"
"errors"
"fmt"
"io"
"log/slog"
"time"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/ec2"
ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/osimage"
"github.com/edgelesssys/constellation/v2/internal/osimage/secureboot"
)
// Uploader can upload and remove os images on GCP.
type Uploader struct {
region string
bucketName string
ec2 func(ctx context.Context, region string) (ec2API, error)
s3 func(ctx context.Context, region string) (s3API, error)
s3uploader func(ctx context.Context, region string) (s3UploaderAPI, error)
log *slog.Logger
}
// New creates a new Uploader.
func New(region, bucketName string, log *slog.Logger) (*Uploader, error) {
return &Uploader{
region: region,
bucketName: bucketName,
ec2: func(ctx context.Context, region string) (ec2API, error) {
cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region))
if err != nil {
return nil, err
}
return ec2.NewFromConfig(cfg), nil
},
s3: func(ctx context.Context, region string) (s3API, error) {
cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region))
if err != nil {
return nil, err
}
return s3.NewFromConfig(cfg), nil
},
s3uploader: func(ctx context.Context, region string) (s3UploaderAPI, error) {
cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region))
if err != nil {
return nil, err
}
return s3manager.NewUploader(s3.NewFromConfig(cfg)), nil
},
log: log,
}, nil
}
// Upload uploads an OS image to AWS.
func (u *Uploader) Upload(ctx context.Context, req *osimage.UploadRequest) ([]versionsapi.ImageInfoEntry, error) {
blobName := fmt.Sprintf("image-%s-%s-%d.raw", req.Version.Stream(), req.Version.Version(), req.Timestamp.Unix())
imageName := imageName(req.Version, req.AttestationVariant, req.Timestamp)
allRegions := []string{u.region}
allRegions = append(allRegions, replicationRegions...)
// TODO(malt3): make this configurable
publish := true
amiIDs := make(map[string]string, len(allRegions))
if err := u.ensureBucket(ctx); err != nil {
return nil, fmt.Errorf("ensuring bucket %s exists: %w", u.bucketName, err)
}
// pre-cleaning
for _, region := range allRegions {
if err := u.ensureImageDeleted(ctx, imageName, region); err != nil {
return nil, fmt.Errorf("pre-cleaning: ensuring no image under the name %s in region %s: %w", imageName, region, err)
}
}
if err := u.ensureSnapshotDeleted(ctx, imageName, u.region); err != nil {
return nil, fmt.Errorf("pre-cleaning: ensuring no snapshot using the same name exists: %w", err)
}
if err := u.ensureBlobDeleted(ctx, blobName); err != nil {
return nil, fmt.Errorf("pre-cleaning: ensuring no blob using the same name exists: %w", err)
}
// create primary image
if err := u.uploadBlob(ctx, blobName, req.Image); err != nil {
return nil, fmt.Errorf("uploading image to s3: %w", err)
}
defer func() {
if err := u.ensureBlobDeleted(ctx, blobName); err != nil {
u.log.Error("post-cleaning: deleting temporary blob from s3", err)
}
}()
snapshotID, err := u.importSnapshot(ctx, blobName, imageName)
if err != nil {
return nil, fmt.Errorf("importing snapshot: %w", err)
}
primaryAMIID, err := u.createImageFromSnapshot(ctx, req.Version, imageName, snapshotID, req.SecureBoot, req.UEFIVarStore)
if err != nil {
return nil, fmt.Errorf("creating image from snapshot: %w", err)
}
amiIDs[u.region] = primaryAMIID
if err := u.waitForImage(ctx, primaryAMIID, u.region); err != nil {
return nil, fmt.Errorf("waiting for primary image to become available: %w", err)
}
// replicate image
for _, region := range replicationRegions {
amiID, err := u.replicateImage(ctx, imageName, primaryAMIID, region)
if err != nil {
return nil, fmt.Errorf("replicating image to region %s: %w", region, err)
}
amiIDs[region] = amiID
}
// wait for replication, tag, publish
var imageInfo []versionsapi.ImageInfoEntry
for _, region := range allRegions {
if err := u.waitForImage(ctx, amiIDs[region], region); err != nil {
return nil, fmt.Errorf("waiting for image to become available in region %s: %w", region, err)
}
if err := u.tagImageAndSnapshot(ctx, imageName, amiIDs[region], region); err != nil {
return nil, fmt.Errorf("tagging image in region %s: %w", region, err)
}
if !publish {
continue
}
if err := u.publishImage(ctx, amiIDs[region], region); err != nil {
return nil, fmt.Errorf("publishing image in region %s: %w", region, err)
}
imageInfo = append(imageInfo, versionsapi.ImageInfoEntry{
CSP: "aws",
AttestationVariant: req.AttestationVariant,
Reference: amiIDs[region],
Region: region,
})
}
return imageInfo, nil
}
func (u *Uploader) ensureBucket(ctx context.Context) error {
s3C, err := u.s3(ctx, u.region)
if err != nil {
return fmt.Errorf("determining if bucket %s exists: %w", u.bucketName, err)
}
_, err = s3C.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: &u.bucketName,
})
if err == nil {
u.log.Debug(fmt.Sprintf("Bucket %s exists", u.bucketName))
return nil
}
var noSuchBucketErr *types.NoSuchBucket
if !errors.As(err, &noSuchBucketErr) {
return fmt.Errorf("determining if bucket %s exists: %w", u.bucketName, err)
}
u.log.Debug(fmt.Sprintf("Creating bucket %s", u.bucketName))
_, err = s3C.CreateBucket(ctx, &s3.CreateBucketInput{
Bucket: &u.bucketName,
})
if err != nil {
return fmt.Errorf("creating bucket %s: %w", u.bucketName, err)
}
return nil
}
func (u *Uploader) uploadBlob(ctx context.Context, blobName string, img io.Reader) error {
u.log.Debug(fmt.Sprintf("Uploading os image as %s", blobName))
uploadC, err := u.s3uploader(ctx, u.region)
if err != nil {
return err
}
_, err = uploadC.Upload(ctx, &s3.PutObjectInput{
Bucket: &u.bucketName,
Key: &blobName,
Body: img,
ChecksumAlgorithm: s3types.ChecksumAlgorithmSha256,
})
return err
}
func (u *Uploader) ensureBlobDeleted(ctx context.Context, blobName string) error {
s3C, err := u.s3(ctx, u.region)
if err != nil {
return err
}
_, err = s3C.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: &u.bucketName,
Key: &blobName,
})
var apiError smithy.APIError
if errors.As(err, &apiError) && apiError.ErrorCode() == "NotFound" {
u.log.Debug("Blob %s in %s doesn't exist. Nothing to clean up.", blobName, u.bucketName)
return nil
}
if err != nil {
return err
}
u.log.Debug(fmt.Sprintf("Deleting blob %s", blobName))
_, err = s3C.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: &u.bucketName,
Key: &blobName,
})
return err
}
func (u *Uploader) findSnapshots(ctx context.Context, snapshotName, region string) ([]string, error) {
ec2C, err := u.ec2(ctx, region)
if err != nil {
return nil, fmt.Errorf("creating ec2 client: %w", err)
}
snapshots, err := ec2C.DescribeSnapshots(ctx, &ec2.DescribeSnapshotsInput{
Filters: []ec2types.Filter{
{
Name: toPtr("tag:Name"),
Values: []string{snapshotName},
},
},
})
if err != nil {
return nil, fmt.Errorf("describing snapshots: %w", err)
}
var snapshotIDs []string
for _, s := range snapshots.Snapshots {
if s.SnapshotId == nil {
continue
}
snapshotIDs = append(snapshotIDs, *s.SnapshotId)
}
return snapshotIDs, nil
}
func (u *Uploader) importSnapshot(ctx context.Context, blobName, snapshotName string) (string, error) {
u.log.Debug("Importing %s as snapshot %s", blobName, snapshotName)
ec2C, err := u.ec2(ctx, u.region)
if err != nil {
return "", fmt.Errorf("creating ec2 client: %w", err)
}
importResp, err := ec2C.ImportSnapshot(ctx, &ec2.ImportSnapshotInput{
ClientData: &ec2types.ClientData{
Comment: &snapshotName,
},
Description: &snapshotName,
DiskContainer: &ec2types.SnapshotDiskContainer{
Description: &snapshotName,
Format: toPtr(string(ec2types.DiskImageFormatRaw)),
UserBucket: &ec2types.UserBucket{
S3Bucket: &u.bucketName,
S3Key: &blobName,
},
},
})
if err != nil {
return "", fmt.Errorf("importing snapshot: %w", err)
}
if importResp.ImportTaskId == nil {
return "", fmt.Errorf("importing snapshot: no import task ID returned")
}
u.log.Debug(fmt.Sprintf("Waiting for snapshot %s to be ready", snapshotName))
return waitForSnapshotImport(ctx, ec2C, *importResp.ImportTaskId)
}
func (u *Uploader) ensureSnapshotDeleted(ctx context.Context, snapshotName, region string) error {
ec2C, err := u.ec2(ctx, region)
if err != nil {
return fmt.Errorf("creating ec2 client: %w", err)
}
snapshots, err := u.findSnapshots(ctx, snapshotName, region)
if err != nil {
return fmt.Errorf("finding snapshots: %w", err)
}
for _, snapshot := range snapshots {
u.log.Debug("Deleting snapshot %s in %s", snapshot, region)
_, err = ec2C.DeleteSnapshot(ctx, &ec2.DeleteSnapshotInput{
SnapshotId: toPtr(snapshot),
})
if err != nil {
return fmt.Errorf("deleting snapshot %s: %w", snapshot, err)
}
}
return nil
}
func (u *Uploader) createImageFromSnapshot(ctx context.Context, version versionsapi.Version, imageName, snapshotID string, enableSecureBoot bool, uefiVarStore secureboot.UEFIVarStore) (string, error) {
u.log.Debug("Creating image %s in %s", imageName, u.region)
ec2C, err := u.ec2(ctx, u.region)
if err != nil {
return "", fmt.Errorf("creating ec2 client: %w", err)
}
var uefiData *string
if enableSecureBoot {
awsUEFIData, err := uefiVarStore.ToAWS()
if err != nil {
return "", fmt.Errorf("creating uefi data: %w", err)
}
uefiData = toPtr(awsUEFIData)
}
createReq, err := ec2C.RegisterImage(ctx, &ec2.RegisterImageInput{
Name: &imageName,
Architecture: ec2types.ArchitectureValuesX8664,
BlockDeviceMappings: []ec2types.BlockDeviceMapping{
{
DeviceName: toPtr("/dev/xvda"),
Ebs: &ec2types.EbsBlockDevice{
DeleteOnTermination: toPtr(true),
SnapshotId: &snapshotID,
},
},
},
BootMode: ec2types.BootModeValuesUefi,
Description: toPtr("Constellation " + version.ShortPath()),
EnaSupport: toPtr(true),
RootDeviceName: toPtr("/dev/xvda"),
TpmSupport: ec2types.TpmSupportValuesV20,
UefiData: uefiData,
VirtualizationType: toPtr("hvm"),
})
if err != nil {
return "", fmt.Errorf("creating image: %w", err)
}
if createReq.ImageId == nil {
return "", fmt.Errorf("creating image: no image ID returned")
}
return *createReq.ImageId, nil
}
func (u *Uploader) replicateImage(ctx context.Context, imageName, amiID string, region string) (string, error) {
u.log.Debug("Replicating image %s to %s", imageName, region)
ec2C, err := u.ec2(ctx, region)
if err != nil {
return "", fmt.Errorf("creating ec2 client: %w", err)
}
replicateReq, err := ec2C.CopyImage(ctx, &ec2.CopyImageInput{
Name: &imageName,
SourceImageId: &amiID,
SourceRegion: &u.region,
})
if err != nil {
return "", fmt.Errorf("replicating image: %w", err)
}
if replicateReq.ImageId == nil {
return "", fmt.Errorf("replicating image: no image ID returned")
}
return *replicateReq.ImageId, nil
}
func (u *Uploader) findImage(ctx context.Context, imageName, region string) (string, error) {
ec2C, err := u.ec2(ctx, region)
if err != nil {
return "", fmt.Errorf("creating ec2 client: %w", err)
}
snapshots, err := ec2C.DescribeImages(ctx, &ec2.DescribeImagesInput{
Filters: []ec2types.Filter{
{
Name: toPtr("name"),
Values: []string{imageName},
},
},
})
if err != nil {
return "", fmt.Errorf("describing images: %w", err)
}
if len(snapshots.Images) == 0 {
return "", errAMIDoesNotExist
}
if len(snapshots.Images) != 1 {
return "", fmt.Errorf("expected 1 image, got %d", len(snapshots.Images))
}
if snapshots.Images[0].ImageId == nil {
return "", fmt.Errorf("image ID is nil")
}
return *snapshots.Images[0].ImageId, nil
}
func (u *Uploader) waitForImage(ctx context.Context, amiID, region string) error {
u.log.Debug("Waiting for image %s in %s to be created", amiID, region)
ec2C, err := u.ec2(ctx, region)
if err != nil {
return fmt.Errorf("creating ec2 client: %w", err)
}
waiter := ec2.NewImageAvailableWaiter(ec2C)
err = waiter.Wait(ctx, &ec2.DescribeImagesInput{
ImageIds: []string{amiID},
}, maxWait)
if err != nil {
return fmt.Errorf("waiting for image: %w", err)
}
return nil
}
func (u *Uploader) tagImageAndSnapshot(ctx context.Context, imageName, amiID, region string) error {
u.log.Debug("Tagging backing snapshot of image %s in %s", amiID, region)
ec2C, err := u.ec2(ctx, region)
if err != nil {
return fmt.Errorf("creating ec2 client: %w", err)
}
snapshotID, err := getBackingSnapshotID(ctx, ec2C, amiID)
if err != nil {
return fmt.Errorf("getting backing snapshot ID: %w", err)
}
_, err = ec2C.CreateTags(ctx, &ec2.CreateTagsInput{
Resources: []string{amiID, snapshotID},
Tags: []ec2types.Tag{
{
Key: toPtr("Name"),
Value: toPtr(imageName),
},
},
})
if err != nil {
return fmt.Errorf("tagging ami and snapshot: %w", err)
}
return nil
}
func (u *Uploader) publishImage(ctx context.Context, imageName, region string) error {
u.log.Debug("Publishing image %s in %s", imageName, region)
ec2C, err := u.ec2(ctx, region)
if err != nil {
return fmt.Errorf("creating ec2 client: %w", err)
}
_, err = ec2C.ModifyImageAttribute(ctx, &ec2.ModifyImageAttributeInput{
ImageId: &imageName,
LaunchPermission: &ec2types.LaunchPermissionModifications{
Add: []ec2types.LaunchPermission{
{
Group: ec2types.PermissionGroupAll,
},
},
},
})
if err != nil {
return fmt.Errorf("publishing image: %w", err)
}
return nil
}
func (u *Uploader) ensureImageDeleted(ctx context.Context, imageName, region string) error {
ec2C, err := u.ec2(ctx, region)
if err != nil {
return fmt.Errorf("creating ec2 client: %w", err)
}
amiID, err := u.findImage(ctx, imageName, region)
if err == errAMIDoesNotExist {
u.log.Debug("Image %s in %s doesn't exist. Nothing to clean up.", imageName, region)
return nil
}
snapshotID, err := getBackingSnapshotID(ctx, ec2C, amiID)
if err == errAMIDoesNotExist {
u.log.Debug(fmt.Sprintf("Image %s doesn't exist. Nothing to clean up.", amiID))
return nil
}
u.log.Debug("Deleting image %s in %s with backing snapshot", amiID, region)
_, err = ec2C.DeregisterImage(ctx, &ec2.DeregisterImageInput{
ImageId: &amiID,
})
if err != nil {
return fmt.Errorf("deleting image: %w", err)
}
_, err = ec2C.DeleteSnapshot(ctx, &ec2.DeleteSnapshotInput{
SnapshotId: &snapshotID,
})
if err != nil {
return fmt.Errorf("deleting snapshot: %w", err)
}
return nil
}
func imageName(version versionsapi.Version, attestationVariant string, timestamp time.Time) string {
if version.Stream() == "stable" {
return fmt.Sprintf("constellation-%s-%s", version.Version(), attestationVariant)
}
return fmt.Sprintf("constellation-%s-%s-%s-%s", version.Stream(), version.Version(), attestationVariant, timestamp.Format(timestampFormat))
}
func waitForSnapshotImport(ctx context.Context, ec2C ec2API, importTaskID string) (string, error) {
for {
taskResp, err := ec2C.DescribeImportSnapshotTasks(ctx, &ec2.DescribeImportSnapshotTasksInput{
ImportTaskIds: []string{importTaskID},
})
if err != nil {
return "", fmt.Errorf("describing import snapshot task: %w", err)
}
if len(taskResp.ImportSnapshotTasks) == 0 {
return "", fmt.Errorf("describing import snapshot task: no tasks returned")
}
if taskResp.ImportSnapshotTasks[0].SnapshotTaskDetail == nil {
return "", fmt.Errorf("describing import snapshot task: no snapshot task detail returned")
}
if taskResp.ImportSnapshotTasks[0].SnapshotTaskDetail.Status == nil {
return "", fmt.Errorf("describing import snapshot task: no status returned")
}
switch *taskResp.ImportSnapshotTasks[0].SnapshotTaskDetail.Status {
case string(ec2types.SnapshotStateCompleted):
return *taskResp.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId, nil
case string(ec2types.SnapshotStateError):
return "", fmt.Errorf("importing snapshot: task failed")
}
time.Sleep(waitInterval)
}
}
func getBackingSnapshotID(ctx context.Context, ec2C ec2API, amiID string) (string, error) {
describeResp, err := ec2C.DescribeImages(ctx, &ec2.DescribeImagesInput{
ImageIds: []string{amiID},
})
if err != nil || len(describeResp.Images) == 0 {
return "", errAMIDoesNotExist
}
if len(describeResp.Images) != 1 {
return "", fmt.Errorf("describing image: expected 1 image, got %d", len(describeResp.Images))
}
image := describeResp.Images[0]
if len(image.BlockDeviceMappings) != 1 {
return "", fmt.Errorf("found %d block device mappings for image %s, expected 1", len(image.BlockDeviceMappings), amiID)
}
if image.BlockDeviceMappings[0].Ebs == nil {
return "", fmt.Errorf("image %s does not have an EBS block device mapping", amiID)
}
ebs := image.BlockDeviceMappings[0].Ebs
if ebs.SnapshotId == nil {
return "", fmt.Errorf("image %s does not have an EBS snapshot", amiID)
}
return *ebs.SnapshotId, nil
}
type ec2API interface {
DescribeImages(ctx context.Context, params *ec2.DescribeImagesInput,
optFns ...func(*ec2.Options),
) (*ec2.DescribeImagesOutput, error)
ModifyImageAttribute(ctx context.Context, params *ec2.ModifyImageAttributeInput,
optFns ...func(*ec2.Options),
) (*ec2.ModifyImageAttributeOutput, error)
RegisterImage(ctx context.Context, params *ec2.RegisterImageInput,
optFns ...func(*ec2.Options),
) (*ec2.RegisterImageOutput, error)
CopyImage(ctx context.Context, params *ec2.CopyImageInput, optFns ...func(*ec2.Options),
) (*ec2.CopyImageOutput, error)
DeregisterImage(ctx context.Context, params *ec2.DeregisterImageInput,
optFns ...func(*ec2.Options),
) (*ec2.DeregisterImageOutput, error)
ImportSnapshot(ctx context.Context, params *ec2.ImportSnapshotInput,
optFns ...func(*ec2.Options),
) (*ec2.ImportSnapshotOutput, error)
DescribeImportSnapshotTasks(ctx context.Context, params *ec2.DescribeImportSnapshotTasksInput,
optFns ...func(*ec2.Options),
) (*ec2.DescribeImportSnapshotTasksOutput, error)
DescribeSnapshots(ctx context.Context, params *ec2.DescribeSnapshotsInput,
optFns ...func(*ec2.Options),
) (*ec2.DescribeSnapshotsOutput, error)
DeleteSnapshot(ctx context.Context, params *ec2.DeleteSnapshotInput, optFns ...func(*ec2.Options),
) (*ec2.DeleteSnapshotOutput, error)
CreateTags(ctx context.Context, params *ec2.CreateTagsInput, optFns ...func(*ec2.Options),
) (*ec2.CreateTagsOutput, error)
}
type s3API interface {
HeadBucket(ctx context.Context, params *s3.HeadBucketInput, optFns ...func(*s3.Options),
) (*s3.HeadBucketOutput, error)
CreateBucket(ctx context.Context, params *s3.CreateBucketInput, optFns ...func(*s3.Options),
) (*s3.CreateBucketOutput, error)
HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options),
) (*s3.HeadObjectOutput, error)
DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options),
) (*s3.DeleteObjectOutput, error)
}
type s3UploaderAPI interface {
Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*s3manager.Uploader),
) (*s3manager.UploadOutput, error)
}
func toPtr[T any](v T) *T {
return &v
}
const (
waitInterval = 15 * time.Second
maxWait = 30 * time.Minute
timestampFormat = "20060102150405"
)
var (
errAMIDoesNotExist = errors.New("ami does not exist")
replicationRegions = []string{"eu-west-1", "eu-west-3", "us-east-2", "ap-south-1"}
)

View file

@ -1,710 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
// package azure implements uploading os images to azure.
package azure
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"log/slog"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
armcomputev5 "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/osimage"
)
// Uploader can upload and remove os images on Azure.
type Uploader struct {
subscription string
location string
resourceGroup string
pollingFrequency time.Duration
disks azureDiskAPI
managedImages azureManagedImageAPI
blob sasBlobUploader
galleries azureGalleriesAPI
image azureGalleriesImageAPI
imageVersions azureGalleriesImageVersionAPI
communityVersions azureCommunityGalleryImageVersionAPI
log *slog.Logger
}
// New creates a new Uploader.
func New(subscription, location, resourceGroup string, log *slog.Logger) (*Uploader, error) {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
return nil, err
}
diskClient, err := armcomputev5.NewDisksClient(subscription, cred, nil)
if err != nil {
return nil, err
}
managedImagesClient, err := armcomputev5.NewImagesClient(subscription, cred, nil)
if err != nil {
return nil, err
}
galleriesClient, err := armcomputev5.NewGalleriesClient(subscription, cred, nil)
if err != nil {
return nil, err
}
galleriesImageClient, err := armcomputev5.NewGalleryImagesClient(subscription, cred, nil)
if err != nil {
return nil, err
}
galleriesImageVersionClient, err := armcomputev5.NewGalleryImageVersionsClient(subscription, cred, nil)
if err != nil {
return nil, err
}
communityImageVersionClient, err := armcomputev5.NewCommunityGalleryImageVersionsClient(subscription, cred, nil)
if err != nil {
return nil, err
}
return &Uploader{
subscription: subscription,
location: location,
resourceGroup: resourceGroup,
pollingFrequency: pollingFrequency,
disks: diskClient,
managedImages: managedImagesClient,
blob: func(sasBlobURL string) (azurePageblobAPI, error) {
return pageblob.NewClientWithNoCredential(sasBlobURL, nil)
},
galleries: galleriesClient,
image: galleriesImageClient,
imageVersions: galleriesImageVersionClient,
communityVersions: communityImageVersionClient,
log: log,
}, nil
}
// Upload uploads an OS image to Azure.
func (u *Uploader) Upload(ctx context.Context, req *osimage.UploadRequest) ([]versionsapi.ImageInfoEntry, error) {
formattedTime := req.Timestamp.Format(timestampFormat)
diskName := fmt.Sprintf("constellation-%s-%s-%s", req.Version.Stream(), formattedTime, req.AttestationVariant)
var sigName string
switch req.Version.Stream() {
case "stable":
sigName = sigNameStable
case "debug":
sigName = sigNameDebug
default:
sigName = sigNameDefault
}
definitionName := imageOffer(req.Version)
versionName, err := imageVersion(req.Version, req.Timestamp)
if err != nil {
return nil, fmt.Errorf("determining image version name: %w", err)
}
// ensure new image can be uploaded by deleting existing resources using the same name
if err := u.ensureImageVersionDeleted(ctx, sigName, definitionName, versionName); err != nil {
return nil, fmt.Errorf("pre-cleaning: ensuring no image version using the same name exists: %w", err)
}
if err := u.ensureManagedImageDeleted(ctx, diskName); err != nil {
return nil, fmt.Errorf("pre-cleaning: ensuring no managed image using the same name exists: %w", err)
}
if err := u.ensureDiskDeleted(ctx, diskName); err != nil {
return nil, fmt.Errorf("pre-cleaning: ensuring no temporary disk using the same name exists: %w", err)
}
diskID, err := u.createDisk(ctx, diskName, DiskTypeNormal, req.Image, nil, req.Size)
if err != nil {
return nil, fmt.Errorf("creating disk: %w", err)
}
defer func() {
// cleanup temp disk
err := u.ensureDiskDeleted(ctx, diskName)
if err != nil {
u.log.Error("post-cleaning: deleting disk image: %v", err)
}
}()
managedImageID, err := u.createManagedImage(ctx, diskName, diskID)
if err != nil {
return nil, fmt.Errorf("creating managed image: %w", err)
}
if err := u.ensureSIG(ctx, sigName); err != nil {
return nil, fmt.Errorf("ensuring sig exists: %w", err)
}
if err := u.ensureImageDefinition(ctx, sigName, definitionName, req.Version, req.AttestationVariant); err != nil {
return nil, fmt.Errorf("ensuring image definition exists: %w", err)
}
unsharedImageVersionID, err := u.createImageVersion(ctx, sigName, definitionName, versionName, managedImageID)
if err != nil {
return nil, fmt.Errorf("creating image version: %w", err)
}
imageReference, err := u.getImageReference(ctx, sigName, definitionName, versionName, unsharedImageVersionID)
if err != nil {
return nil, fmt.Errorf("getting image reference: %w", err)
}
return []versionsapi.ImageInfoEntry{
{
CSP: "azure",
AttestationVariant: req.AttestationVariant,
Reference: imageReference,
},
}, nil
}
// createDisk creates and initializes (uploads contents of) an azure disk.
func (u *Uploader) createDisk(ctx context.Context, diskName string, diskType DiskType, img io.ReadSeeker, vmgs io.ReadSeeker, size int64) (string, error) {
u.log.Debug("Creating disk %s in %s", diskName, u.resourceGroup)
if diskType == DiskTypeWithVMGS && vmgs == nil {
return "", errors.New("cannot create disk with vmgs: vmgs reader is nil")
}
var createOption armcomputev5.DiskCreateOption
var requestVMGSSAS bool
switch diskType {
case DiskTypeNormal:
createOption = armcomputev5.DiskCreateOptionUpload
case DiskTypeWithVMGS:
createOption = armcomputev5.DiskCreateOptionUploadPreparedSecure
requestVMGSSAS = true
}
disk := armcomputev5.Disk{
Location: &u.location,
Properties: &armcomputev5.DiskProperties{
CreationData: &armcomputev5.CreationData{
CreateOption: &createOption,
UploadSizeBytes: toPtr(size),
},
HyperVGeneration: toPtr(armcomputev5.HyperVGenerationV2),
OSType: toPtr(armcomputev5.OperatingSystemTypesLinux),
},
}
createPoller, err := u.disks.BeginCreateOrUpdate(ctx, u.resourceGroup, diskName, disk, &armcomputev5.DisksClientBeginCreateOrUpdateOptions{})
if err != nil {
return "", fmt.Errorf("creating disk: %w", err)
}
createdDisk, err := createPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency})
if err != nil {
return "", fmt.Errorf("waiting for disk to be created: %w", err)
}
u.log.Debug("Granting temporary upload permissions via SAS token")
accessGrant := armcomputev5.GrantAccessData{
Access: toPtr(armcomputev5.AccessLevelWrite),
DurationInSeconds: toPtr(int32(uploadAccessDuration)),
GetSecureVMGuestStateSAS: &requestVMGSSAS,
}
accessPoller, err := u.disks.BeginGrantAccess(ctx, u.resourceGroup, diskName, accessGrant, &armcomputev5.DisksClientBeginGrantAccessOptions{})
if err != nil {
return "", fmt.Errorf("generating disk sas token: %w", err)
}
accesPollerResp, err := accessPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency})
if err != nil {
return "", fmt.Errorf("waiting for sas token: %w", err)
}
if requestVMGSSAS {
u.log.Debug("Uploading vmgs")
vmgsSize, err := vmgs.Seek(0, io.SeekEnd)
if err != nil {
return "", err
}
if _, err := vmgs.Seek(0, io.SeekStart); err != nil {
return "", err
}
if accesPollerResp.SecurityDataAccessSAS == nil {
return "", errors.New("uploading vmgs: grant access returned no vmgs sas")
}
if err := uploadBlob(ctx, *accesPollerResp.SecurityDataAccessSAS, vmgs, vmgsSize, u.blob); err != nil {
return "", fmt.Errorf("uploading vmgs: %w", err)
}
}
u.log.Debug("Uploading os image")
if accesPollerResp.AccessSAS == nil {
return "", errors.New("uploading disk: grant access returned no disk sas")
}
if err := uploadBlob(ctx, *accesPollerResp.AccessSAS, img, size, u.blob); err != nil {
return "", fmt.Errorf("uploading image: %w", err)
}
revokePoller, err := u.disks.BeginRevokeAccess(ctx, u.resourceGroup, diskName, &armcomputev5.DisksClientBeginRevokeAccessOptions{})
if err != nil {
return "", fmt.Errorf("revoking disk sas token: %w", err)
}
if _, err := revokePoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil {
return "", fmt.Errorf("waiting for sas token revocation: %w", err)
}
if createdDisk.ID == nil {
return "", errors.New("created disk has no id")
}
return *createdDisk.ID, nil
}
func (u *Uploader) ensureDiskDeleted(ctx context.Context, diskName string) error {
_, err := u.disks.Get(ctx, u.resourceGroup, diskName, &armcomputev5.DisksClientGetOptions{})
if err != nil {
u.log.Debug("Disk %s in %s doesn't exist. Nothing to clean up.", diskName, u.resourceGroup)
return nil
}
u.log.Debug("Deleting disk %s in %s", diskName, u.resourceGroup)
deletePoller, err := u.disks.BeginDelete(ctx, u.resourceGroup, diskName, &armcomputev5.DisksClientBeginDeleteOptions{})
if err != nil {
return fmt.Errorf("deleting disk: %w", err)
}
if _, err = deletePoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil {
return fmt.Errorf("waiting for disk to be deleted: %w", err)
}
return nil
}
func (u *Uploader) createManagedImage(ctx context.Context, imageName string, diskID string) (string, error) {
u.log.Debug("Creating managed image %s in %s", imageName, u.resourceGroup)
image := armcomputev5.Image{
Location: &u.location,
Properties: &armcomputev5.ImageProperties{
HyperVGeneration: toPtr(armcomputev5.HyperVGenerationTypesV2),
StorageProfile: &armcomputev5.ImageStorageProfile{
OSDisk: &armcomputev5.ImageOSDisk{
OSState: toPtr(armcomputev5.OperatingSystemStateTypesGeneralized),
OSType: toPtr(armcomputev5.OperatingSystemTypesLinux),
ManagedDisk: &armcomputev5.SubResource{
ID: &diskID,
},
},
},
},
}
createPoller, err := u.managedImages.BeginCreateOrUpdate(
ctx, u.resourceGroup, imageName, image,
&armcomputev5.ImagesClientBeginCreateOrUpdateOptions{},
)
if err != nil {
return "", fmt.Errorf("creating managed image: %w", err)
}
createdImage, err := createPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency})
if err != nil {
return "", fmt.Errorf("waiting for image to be created: %w", err)
}
if createdImage.ID == nil {
return "", errors.New("created image has no id")
}
return *createdImage.ID, nil
}
func (u *Uploader) ensureManagedImageDeleted(ctx context.Context, imageName string) error {
_, err := u.managedImages.Get(ctx, u.resourceGroup, imageName, &armcomputev5.ImagesClientGetOptions{})
if err != nil {
u.log.Debug("Managed image %s in %s doesn't exist. Nothing to clean up.", imageName, u.resourceGroup)
return nil
}
u.log.Debug("Deleting managed image %s in %s", imageName, u.resourceGroup)
deletePoller, err := u.managedImages.BeginDelete(ctx, u.resourceGroup, imageName, &armcomputev5.ImagesClientBeginDeleteOptions{})
if err != nil {
return fmt.Errorf("deleting image: %w", err)
}
if _, err = deletePoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil {
return fmt.Errorf("waiting for image to be deleted: %w", err)
}
return nil
}
// ensureSIG creates a SIG if it does not exist yet.
func (u *Uploader) ensureSIG(ctx context.Context, sigName string) error {
_, err := u.galleries.Get(ctx, u.resourceGroup, sigName, &armcomputev5.GalleriesClientGetOptions{})
if err == nil {
u.log.Debug("Image gallery %s in %s exists", sigName, u.resourceGroup)
return nil
}
u.log.Debug("Creating image gallery %s in %s", sigName, u.resourceGroup)
gallery := armcomputev5.Gallery{
Location: &u.location,
}
createPoller, err := u.galleries.BeginCreateOrUpdate(ctx, u.resourceGroup, sigName, gallery,
&armcomputev5.GalleriesClientBeginCreateOrUpdateOptions{},
)
if err != nil {
return fmt.Errorf("creating image gallery: %w", err)
}
if _, err = createPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil {
return fmt.Errorf("waiting for image gallery to be created: %w", err)
}
return nil
}
// ensureImageDefinition creates an image definition (component of a SIG) if it does not exist yet.
func (u *Uploader) ensureImageDefinition(ctx context.Context, sigName, definitionName string, version versionsapi.Version, attestationVariant string) error {
_, err := u.image.Get(ctx, u.resourceGroup, sigName, definitionName, &armcomputev5.GalleryImagesClientGetOptions{})
if err == nil {
u.log.Debug(fmt.Sprintf("Image definition %s/%s in %s exists", sigName, definitionName, u.resourceGroup))
return nil
}
u.log.Debug(fmt.Sprintf("Creating image definition %s/%s in %s", sigName, definitionName, u.resourceGroup))
var securityType string
// TODO(malt3): This needs to allow the *Supported or the normal variant
// based on wether a VMGS was provided or not.
// VMGS provided: ConfidentialVM
// No VMGS provided: ConfidentialVMSupported
switch strings.ToLower(attestationVariant) {
case "azure-sev-snp":
securityType = string("ConfidentialVMSupported")
case "azure-trustedlaunch":
securityType = string(armcomputev5.SecurityTypesTrustedLaunch)
}
offer := imageOffer(version)
galleryImage := armcomputev5.GalleryImage{
Location: &u.location,
Properties: &armcomputev5.GalleryImageProperties{
Identifier: &armcomputev5.GalleryImageIdentifier{
Offer: &offer,
Publisher: toPtr(imageDefinitionPublisher),
SKU: toPtr(imageDefinitionSKU),
},
OSState: toPtr(armcomputev5.OperatingSystemStateTypesGeneralized),
OSType: toPtr(armcomputev5.OperatingSystemTypesLinux),
Architecture: toPtr(armcomputev5.ArchitectureX64),
Features: []*armcomputev5.GalleryImageFeature{
{
Name: toPtr("SecurityType"),
Value: &securityType,
},
},
HyperVGeneration: toPtr(armcomputev5.HyperVGenerationV2),
},
}
createPoller, err := u.image.BeginCreateOrUpdate(ctx, u.resourceGroup, sigName, definitionName, galleryImage,
&armcomputev5.GalleryImagesClientBeginCreateOrUpdateOptions{},
)
if err != nil {
return fmt.Errorf("creating image definition: %w", err)
}
if _, err = createPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil {
return fmt.Errorf("waiting for image definition to be created: %w", err)
}
return nil
}
func (u *Uploader) createImageVersion(ctx context.Context, sigName, definitionName, versionName, imageID string) (string, error) {
u.log.Debug("Creating image version %s/%s/%s in %s", sigName, definitionName, versionName, u.resourceGroup)
imageVersion := armcomputev5.GalleryImageVersion{
Location: &u.location,
Properties: &armcomputev5.GalleryImageVersionProperties{
StorageProfile: &armcomputev5.GalleryImageVersionStorageProfile{
OSDiskImage: &armcomputev5.GalleryOSDiskImage{
HostCaching: toPtr(armcomputev5.HostCachingReadOnly),
},
Source: &armcomputev5.GalleryArtifactVersionFullSource{
ID: &imageID,
},
},
PublishingProfile: &armcomputev5.GalleryImageVersionPublishingProfile{
ReplicaCount: toPtr[int32](1),
ReplicationMode: toPtr(armcomputev5.ReplicationModeFull),
TargetRegions: targetRegions,
},
},
}
createPoller, err := u.imageVersions.BeginCreateOrUpdate(ctx, u.resourceGroup, sigName, definitionName, versionName, imageVersion,
&armcomputev5.GalleryImageVersionsClientBeginCreateOrUpdateOptions{},
)
if err != nil {
return "", fmt.Errorf("creating image version: %w", err)
}
createdImage, err := createPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency})
if err != nil {
return "", fmt.Errorf("waiting for image version to be created: %w", err)
}
if createdImage.ID == nil {
return "", errors.New("created image has no id")
}
return *createdImage.ID, nil
}
func (u *Uploader) ensureImageVersionDeleted(ctx context.Context, sigName, definitionName, versionName string) error {
_, err := u.imageVersions.Get(ctx, u.resourceGroup, sigName, definitionName, versionName, &armcomputev5.GalleryImageVersionsClientGetOptions{})
if err != nil {
u.log.Debug("Image version %s in %s/%s/%s doesn't exist. Nothing to clean up.", versionName, u.resourceGroup, sigName, definitionName)
return nil
}
u.log.Debug("Deleting image version %s in %s/%s/%s", versionName, u.resourceGroup, sigName, definitionName)
deletePoller, err := u.imageVersions.BeginDelete(ctx, u.resourceGroup, sigName, definitionName, versionName, &armcomputev5.GalleryImageVersionsClientBeginDeleteOptions{})
if err != nil {
return fmt.Errorf("deleting image version: %w", err)
}
if _, err = deletePoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil {
return fmt.Errorf("waiting for image version to be deleted: %w", err)
}
return nil
}
// getImageReference returns the image reference to use for the image version.
// If the shared image gallery is a community gallery, the community identifier is returned.
// Otherwise, the unshared identifier is returned.
func (u *Uploader) getImageReference(ctx context.Context, sigName, definitionName, versionName, unsharedID string) (string, error) {
galleryResp, err := u.galleries.Get(ctx, u.resourceGroup, sigName, &armcomputev5.GalleriesClientGetOptions{})
if err != nil {
return "", fmt.Errorf("getting image gallery %s: %w", sigName, err)
}
if galleryResp.Properties == nil ||
galleryResp.Properties.SharingProfile == nil ||
galleryResp.Properties.SharingProfile.CommunityGalleryInfo == nil ||
galleryResp.Properties.SharingProfile.CommunityGalleryInfo.CommunityGalleryEnabled == nil ||
!*galleryResp.Properties.SharingProfile.CommunityGalleryInfo.CommunityGalleryEnabled {
u.log.Warn("Image gallery %s in %s is not shared. Using private identifier", sigName, u.resourceGroup)
return unsharedID, nil
}
if galleryResp.Properties == nil ||
galleryResp.Properties.SharingProfile == nil ||
galleryResp.Properties.SharingProfile.CommunityGalleryInfo == nil ||
galleryResp.Properties.SharingProfile.CommunityGalleryInfo.PublicNames == nil ||
len(galleryResp.Properties.SharingProfile.CommunityGalleryInfo.PublicNames) < 1 ||
galleryResp.Properties.SharingProfile.CommunityGalleryInfo.PublicNames[0] == nil {
return "", fmt.Errorf("image gallery %s in %s is a community gallery but has no public names", sigName, u.resourceGroup)
}
communityGalleryName := *galleryResp.Properties.SharingProfile.CommunityGalleryInfo.PublicNames[0]
u.log.Debug(fmt.Sprintf("Image gallery %s in %s is shared. Using community identifier in %s", sigName, u.resourceGroup, communityGalleryName))
communityVersionResp, err := u.communityVersions.Get(ctx, u.location, communityGalleryName,
definitionName, versionName,
&armcomputev5.CommunityGalleryImageVersionsClientGetOptions{},
)
if err != nil {
return "", fmt.Errorf("getting community image version %s/%s/%s: %w", communityGalleryName, definitionName, versionName, err)
}
if communityVersionResp.Identifier == nil || communityVersionResp.Identifier.UniqueID == nil {
return "", fmt.Errorf("community image version %s/%s/%s has no id", communityGalleryName, definitionName, versionName)
}
return *communityVersionResp.Identifier.UniqueID, nil
}
func uploadBlob(ctx context.Context, sasURL string, disk io.ReadSeeker, size int64, uploader sasBlobUploader) error {
uploadClient, err := uploader(sasURL)
if err != nil {
return fmt.Errorf("uploading blob: %w", err)
}
var offset int64
var chunksize int
chunk := make([]byte, pageSizeMax)
var readErr error
for offset < size {
chunksize, readErr = io.ReadAtLeast(disk, chunk, 1)
if readErr != nil {
return fmt.Errorf("reading from disk: %w", err)
}
if err := uploadChunk(ctx, uploadClient, bytes.NewReader(chunk[:chunksize]), offset, int64(chunksize)); err != nil {
return fmt.Errorf("uploading chunk: %w", err)
}
offset += int64(chunksize)
}
return nil
}
func uploadChunk(ctx context.Context, uploader azurePageblobAPI, chunk io.ReadSeeker, offset, chunksize int64) error {
_, err := uploader.UploadPages(ctx, &readSeekNopCloser{chunk}, blob.HTTPRange{
Offset: offset,
Count: chunksize,
}, nil)
return err
}
func imageOffer(version versionsapi.Version) string {
switch {
case version.Stream() == "stable":
return "constellation"
case version.Stream() == "debug" && version.Ref() == "-":
return version.Version()
}
return version.Ref() + "-" + version.Stream()
}
// imageVersion determines the semantic version string used inside a sig image.
// For releases, the actual semantic version of the image (without leading v) is used (major.minor.patch).
// Otherwise, the version is derived from the commit timestamp.
func imageVersion(version versionsapi.Version, timestamp time.Time) (string, error) {
switch {
case version.Stream() == "stable":
fallthrough
case version.Stream() == "debug" && version.Ref() == "-":
return strings.TrimLeft(version.Version(), "v"), nil
}
formattedTime := timestamp.Format(timestampFormat)
if len(formattedTime) != len(timestampFormat) {
return "", errors.New("invalid timestamp")
}
// <year>.<month><day>.<time>
return formattedTime[:4] + "." + formattedTime[4:8] + "." + formattedTime[8:], nil
}
type sasBlobUploader func(sasBlobURL string) (azurePageblobAPI, error)
type azureDiskAPI interface {
Get(ctx context.Context, resourceGroupName string, diskName string,
options *armcomputev5.DisksClientGetOptions,
) (armcomputev5.DisksClientGetResponse, error)
BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk armcomputev5.Disk,
options *armcomputev5.DisksClientBeginCreateOrUpdateOptions,
) (*runtime.Poller[armcomputev5.DisksClientCreateOrUpdateResponse], error)
BeginDelete(ctx context.Context, resourceGroupName string, diskName string,
options *armcomputev5.DisksClientBeginDeleteOptions,
) (*runtime.Poller[armcomputev5.DisksClientDeleteResponse], error)
BeginGrantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData armcomputev5.GrantAccessData,
options *armcomputev5.DisksClientBeginGrantAccessOptions,
) (*runtime.Poller[armcomputev5.DisksClientGrantAccessResponse], error)
BeginRevokeAccess(ctx context.Context, resourceGroupName string, diskName string,
options *armcomputev5.DisksClientBeginRevokeAccessOptions,
) (*runtime.Poller[armcomputev5.DisksClientRevokeAccessResponse], error)
}
type azureManagedImageAPI interface {
Get(ctx context.Context, resourceGroupName string, imageName string,
options *armcomputev5.ImagesClientGetOptions,
) (armcomputev5.ImagesClientGetResponse, error)
BeginCreateOrUpdate(ctx context.Context, resourceGroupName string,
imageName string, parameters armcomputev5.Image,
options *armcomputev5.ImagesClientBeginCreateOrUpdateOptions,
) (*runtime.Poller[armcomputev5.ImagesClientCreateOrUpdateResponse], error)
BeginDelete(ctx context.Context, resourceGroupName string, imageName string,
options *armcomputev5.ImagesClientBeginDeleteOptions,
) (*runtime.Poller[armcomputev5.ImagesClientDeleteResponse], error)
}
type azurePageblobAPI interface {
UploadPages(ctx context.Context, body io.ReadSeekCloser, contentRange blob.HTTPRange,
options *pageblob.UploadPagesOptions,
) (pageblob.UploadPagesResponse, error)
}
type azureGalleriesAPI interface {
Get(ctx context.Context, resourceGroupName string, galleryName string,
options *armcomputev5.GalleriesClientGetOptions,
) (armcomputev5.GalleriesClientGetResponse, error)
NewListPager(options *armcomputev5.GalleriesClientListOptions,
) *runtime.Pager[armcomputev5.GalleriesClientListResponse]
BeginCreateOrUpdate(ctx context.Context, resourceGroupName string,
galleryName string, gallery armcomputev5.Gallery,
options *armcomputev5.GalleriesClientBeginCreateOrUpdateOptions,
) (*runtime.Poller[armcomputev5.GalleriesClientCreateOrUpdateResponse], error)
}
type azureGalleriesImageAPI interface {
Get(ctx context.Context, resourceGroupName string, galleryName string,
galleryImageName string, options *armcomputev5.GalleryImagesClientGetOptions,
) (armcomputev5.GalleryImagesClientGetResponse, error)
BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string,
galleryImageName string, galleryImage armcomputev5.GalleryImage,
options *armcomputev5.GalleryImagesClientBeginCreateOrUpdateOptions,
) (*runtime.Poller[armcomputev5.GalleryImagesClientCreateOrUpdateResponse], error)
BeginDelete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string,
options *armcomputev5.GalleryImagesClientBeginDeleteOptions,
) (*runtime.Poller[armcomputev5.GalleryImagesClientDeleteResponse], error)
}
type azureGalleriesImageVersionAPI interface {
Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string,
options *armcomputev5.GalleryImageVersionsClientGetOptions,
) (armcomputev5.GalleryImageVersionsClientGetResponse, error)
NewListByGalleryImagePager(resourceGroupName string, galleryName string, galleryImageName string,
options *armcomputev5.GalleryImageVersionsClientListByGalleryImageOptions,
) *runtime.Pager[armcomputev5.GalleryImageVersionsClientListByGalleryImageResponse]
BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string,
galleryImageVersionName string, galleryImageVersion armcomputev5.GalleryImageVersion,
options *armcomputev5.GalleryImageVersionsClientBeginCreateOrUpdateOptions,
) (*runtime.Poller[armcomputev5.GalleryImageVersionsClientCreateOrUpdateResponse], error)
BeginDelete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string,
galleryImageVersionName string, options *armcomputev5.GalleryImageVersionsClientBeginDeleteOptions,
) (*runtime.Poller[armcomputev5.GalleryImageVersionsClientDeleteResponse], error)
}
type azureCommunityGalleryImageVersionAPI interface {
Get(ctx context.Context, location string,
publicGalleryName, galleryImageName, galleryImageVersionName string,
options *armcomputev5.CommunityGalleryImageVersionsClientGetOptions,
) (armcomputev5.CommunityGalleryImageVersionsClientGetResponse, error)
}
const (
pollingFrequency = 10 * time.Second
// uploadAccessDuration is the time in seconds that
// sas tokens should be valid for (24 hours).
uploadAccessDuration = 86400 // 24 hours
pageSizeMax = 4194304 // 4MiB
pageSizeMin = 512 // 512 bytes
sigNameStable = "Constellation_CVM"
sigNameDebug = "Constellation_Debug_CVM"
sigNameDefault = "Constellation_Testing_CVM"
imageDefinitionPublisher = "edgelesssys"
imageDefinitionSKU = "constellation"
timestampFormat = "20060102150405"
)
var targetRegions = []*armcomputev5.TargetRegion{
{
Name: toPtr("northeurope"),
RegionalReplicaCount: toPtr[int32](1),
},
{
Name: toPtr("eastus"),
RegionalReplicaCount: toPtr[int32](1),
},
{
Name: toPtr("westeurope"),
RegionalReplicaCount: toPtr[int32](1),
},
{
Name: toPtr("westus"),
RegionalReplicaCount: toPtr[int32](1),
},
{
Name: toPtr("southeastasia"),
RegionalReplicaCount: toPtr[int32](1),
},
}
//go:generate stringer -type=DiskType -trimprefix=DiskType
// DiskType is the kind of disk created using the Azure API.
type DiskType uint32
// FromString converts a string into an DiskType.
func FromString(s string) DiskType {
switch strings.ToLower(s) {
case strings.ToLower(DiskTypeNormal.String()):
return DiskTypeNormal
case strings.ToLower(DiskTypeWithVMGS.String()):
return DiskTypeWithVMGS
default:
return DiskTypeUnknown
}
}
const (
// DiskTypeUnknown is default value for DiskType.
DiskTypeUnknown DiskType = iota
// DiskTypeNormal creates a normal Azure disk (single block device).
DiskTypeNormal
// DiskTypeWithVMGS creates a disk with VMGS (also called secure disk)
// that has an additional block device for the VMGS disk.
DiskTypeWithVMGS
)
func toPtr[T any](v T) *T {
return &v
}
type readSeekNopCloser struct {
io.ReadSeeker
}
func (n *readSeekNopCloser) Close() error {
return nil
}

View file

@ -1,298 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
// package gcp implements uploading os images to gcp.
package gcp
import (
"context"
"encoding/base64"
"fmt"
"io"
"log/slog"
"net/url"
"path"
"strings"
compute "cloud.google.com/go/compute/apiv1"
"cloud.google.com/go/compute/apiv1/computepb"
"cloud.google.com/go/storage"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/osimage"
"github.com/edgelesssys/constellation/v2/internal/osimage/secureboot"
gaxv2 "github.com/googleapis/gax-go/v2"
)
// Uploader can upload and remove os images on GCP.
type Uploader struct {
project string
location string
bucketName string
image imagesAPI
bucket bucketAPI
log *slog.Logger
}
// New creates a new Uploader.
func New(ctx context.Context, project, location, bucketName string, log *slog.Logger) (*Uploader, error) {
image, err := compute.NewImagesRESTClient(ctx)
if err != nil {
return nil, err
}
storage, err := storage.NewClient(ctx)
if err != nil {
return nil, err
}
bucket := storage.Bucket(bucketName)
return &Uploader{
project: project,
location: location,
bucketName: bucketName,
image: image,
bucket: bucket,
log: log,
}, nil
}
// Upload uploads an OS image to GCP.
func (u *Uploader) Upload(ctx context.Context, req *osimage.UploadRequest) ([]versionsapi.ImageInfoEntry, error) {
imageName := u.imageName(req.Version, req.AttestationVariant)
blobName := imageName + ".tar.gz"
if err := u.ensureBucket(ctx); err != nil {
return nil, fmt.Errorf("setup: ensuring bucket exists: %w", err)
}
if err := u.ensureImageDeleted(ctx, imageName); err != nil {
return nil, fmt.Errorf("pre-cleaning: ensuring no image using the same name exists: %w", err)
}
if err := u.ensureBlobDeleted(ctx, blobName); err != nil {
return nil, fmt.Errorf("pre-cleaning: ensuring no blob using the same name exists: %w", err)
}
if err := u.uploadBlob(ctx, blobName, req.Image); err != nil {
return nil, fmt.Errorf("uploading blob: %w", err)
}
defer func() {
// cleanup temporary blob
if err := u.ensureBlobDeleted(ctx, blobName); err != nil {
u.log.Error("post-cleaning: deleting blob: %v", err)
}
}()
imageRef, err := u.createImage(ctx, req.Version, imageName, blobName, req.SecureBoot, req.SBDatabase)
if err != nil {
return nil, fmt.Errorf("creating image: %w", err)
}
return []versionsapi.ImageInfoEntry{
{
CSP: "gcp",
AttestationVariant: req.AttestationVariant,
Reference: imageRef,
},
}, nil
}
func (u *Uploader) ensureBucket(ctx context.Context) error {
_, err := u.bucket.Attrs(ctx)
if err == nil {
u.log.Debug(fmt.Sprintf("Bucket %s exists", u.bucketName))
return nil
}
if err != storage.ErrBucketNotExist {
return err
}
u.log.Debug(fmt.Sprintf("Creating bucket %s", u.bucketName))
return u.bucket.Create(ctx, u.project, &storage.BucketAttrs{
PublicAccessPrevention: storage.PublicAccessPreventionEnforced,
Location: u.location,
})
}
func (u *Uploader) uploadBlob(ctx context.Context, blobName string, img io.Reader) error {
u.log.Debug(fmt.Sprintf("Uploading os image as %s", blobName))
writer := u.bucket.Object(blobName).NewWriter(ctx)
_, err := io.Copy(writer, img)
if err != nil {
return err
}
return writer.Close()
}
func (u *Uploader) ensureBlobDeleted(ctx context.Context, blobName string) error {
_, err := u.bucket.Object(blobName).Attrs(ctx)
if err == storage.ErrObjectNotExist {
u.log.Debug("Blob %s in %s doesn't exist. Nothing to clean up.", blobName, u.bucketName)
return nil
}
if err != nil {
return err
}
u.log.Debug(fmt.Sprintf("Deleting blob %s", blobName))
return u.bucket.Object(blobName).Delete(ctx)
}
func (u *Uploader) createImage(ctx context.Context, version versionsapi.Version, imageName, blobName string, enableSecureBoot bool, sbDatabase secureboot.Database) (string, error) {
u.log.Debug(fmt.Sprintf("Creating image %s", imageName))
blobURL := u.blobURL(blobName)
family := u.imageFamily(version)
var initialState *computepb.InitialStateConfig
if enableSecureBoot {
initialState = &computepb.InitialStateConfig{
Pk: pk(&sbDatabase),
Keks: keks(&sbDatabase),
Dbs: dbs(&sbDatabase),
}
}
req := computepb.InsertImageRequest{
ImageResource: &computepb.Image{
Name: &imageName,
RawDisk: &computepb.RawDisk{
ContainerType: toPtr("TAR"),
Source: &blobURL,
},
Family: &family,
Architecture: toPtr("X86_64"),
GuestOsFeatures: []*computepb.GuestOsFeature{
{Type: toPtr("GVNIC")},
{Type: toPtr("SEV_CAPABLE")},
{Type: toPtr("SEV_SNP_CAPABLE")},
{Type: toPtr("VIRTIO_SCSI_MULTIQUEUE")},
{Type: toPtr("UEFI_COMPATIBLE")},
},
ShieldedInstanceInitialState: initialState,
},
Project: u.project,
}
op, err := u.image.Insert(ctx, &req)
if err != nil {
return "", fmt.Errorf("creating image: %w", err)
}
if err := op.Wait(ctx); err != nil {
return "", fmt.Errorf("waiting for image to be created: %w", err)
}
policy := &computepb.Policy{
Bindings: []*computepb.Binding{
{
Role: toPtr("roles/compute.imageUser"),
Members: []string{"allAuthenticatedUsers"},
},
},
}
if _, err = u.image.SetIamPolicy(ctx, &computepb.SetIamPolicyImageRequest{
Resource: imageName,
Project: u.project,
GlobalSetPolicyRequestResource: &computepb.GlobalSetPolicyRequest{
Policy: policy,
},
}); err != nil {
return "", fmt.Errorf("setting iam policy: %w", err)
}
image, err := u.image.Get(ctx, &computepb.GetImageRequest{
Image: imageName,
Project: u.project,
})
if err != nil {
return "", fmt.Errorf("created image doesn't exist: %w", err)
}
return strings.TrimPrefix(image.GetSelfLink(), "https://www.googleapis.com/compute/v1/"), nil
}
func (u *Uploader) ensureImageDeleted(ctx context.Context, imageName string) error {
_, err := u.image.Get(ctx, &computepb.GetImageRequest{
Image: imageName,
Project: u.project,
})
if err != nil {
u.log.Debug(fmt.Sprintf("Image %s doesn't exist. Nothing to clean up.", imageName))
return nil
}
u.log.Debug(fmt.Sprintf("Deleting image %s", imageName))
op, err := u.image.Delete(ctx, &computepb.DeleteImageRequest{
Image: imageName,
Project: u.project,
})
if err != nil {
return err
}
return op.Wait(ctx)
}
func (u *Uploader) blobURL(blobName string) string {
return (&url.URL{
Scheme: "https",
Host: "storage.googleapis.com",
Path: path.Join(u.bucketName, blobName),
}).String()
}
func (u *Uploader) imageName(version versionsapi.Version, attestationVariant string) string {
return strings.ReplaceAll(version.Version(), ".", "-") + "-" + attestationVariant + "-" + version.Stream()
}
func (u *Uploader) imageFamily(version versionsapi.Version) string {
if version.Stream() == "stable" {
return "constellation"
}
truncatedRef := version.Ref()
if len(version.Ref()) > 45 {
truncatedRef = version.Ref()[:45]
}
return "constellation-" + truncatedRef
}
func pk(sbDatabase *secureboot.Database) *computepb.FileContentBuffer {
encoded := base64.StdEncoding.EncodeToString(sbDatabase.PK)
return &computepb.FileContentBuffer{
Content: toPtr(encoded),
FileType: toPtr("X509"),
}
}
func keks(sbDatabase *secureboot.Database) []*computepb.FileContentBuffer {
keks := make([]*computepb.FileContentBuffer, 0, len(sbDatabase.Keks))
for _, kek := range sbDatabase.Keks {
encoded := base64.StdEncoding.EncodeToString(kek)
keks = append(keks, &computepb.FileContentBuffer{
Content: toPtr(encoded),
FileType: toPtr("X509"),
})
}
return keks
}
func dbs(sbDatabase *secureboot.Database) []*computepb.FileContentBuffer {
dbs := make([]*computepb.FileContentBuffer, 0, len(sbDatabase.DBs))
for _, db := range sbDatabase.DBs {
encoded := base64.StdEncoding.EncodeToString(db)
dbs = append(dbs, &computepb.FileContentBuffer{
Content: toPtr(encoded),
FileType: toPtr("X509"),
})
}
return dbs
}
type imagesAPI interface {
Get(ctx context.Context, req *computepb.GetImageRequest, opts ...gaxv2.CallOption,
) (*computepb.Image, error)
Insert(ctx context.Context, req *computepb.InsertImageRequest, opts ...gaxv2.CallOption,
) (*compute.Operation, error)
SetIamPolicy(ctx context.Context, req *computepb.SetIamPolicyImageRequest, opts ...gaxv2.CallOption,
) (*computepb.Policy, error)
Delete(ctx context.Context, req *computepb.DeleteImageRequest, opts ...gaxv2.CallOption,
) (*compute.Operation, error)
io.Closer
}
type bucketAPI interface {
Attrs(ctx context.Context) (attrs *storage.BucketAttrs, err error)
Create(ctx context.Context, projectID string, attrs *storage.BucketAttrs) (err error)
Object(name string) *storage.ObjectHandle
}
func toPtr[T any](v T) *T {
return &v
}

View file

@ -78,7 +78,7 @@ func (a *Uploader) Upload(ctx context.Context, imageInfo versionsapi.ImageInfo)
if err != nil {
return "", err
}
a.log.Debug("Archiving image info to s3://%v/%v", a.bucket, key)
a.log.Debug(fmt.Sprintf("Archiving image info to s3://%v/%v", a.bucket, key))
buf := &bytes.Buffer{}
if err := json.NewEncoder(buf).Encode(imageInfo); err != nil {
return "", err

View file

@ -92,7 +92,7 @@ func (a *Uploader) Upload(ctx context.Context, rawMeasurement, signature io.Read
if err != nil {
return "", "", err
}
a.log.Debug("Archiving image measurements to s3://%v/%v and s3://%v/%v", a.bucket, key, a.bucket, sigKey)
a.log.Debug(fmt.Sprintf("Archiving image measurements to s3://%v/%v and s3://%v/%v", a.bucket, key, a.bucket, sigKey))
if _, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{
Bucket: &a.bucket,
Key: &key,

View file

@ -13,6 +13,7 @@ import (
_ "embed"
"errors"
"fmt"
"log/slog"
"os"
"os/exec"
"path/filepath"
@ -23,7 +24,6 @@ import (
"github.com/BurntSushi/toml"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/osimage"
)
@ -36,11 +36,11 @@ const timestampFormat = "20060102150405"
type Uploader struct {
uplosiPath string
log *logger.Logger
log *slog.Logger
}
// New creates a new Uploader.
func New(uplosiPath string, log *logger.Logger) *Uploader {
func New(uplosiPath string, log *slog.Logger) *Uploader {
return &Uploader{
uplosiPath: uplosiPath,
log: log,

View file

@ -216,12 +216,12 @@ type Certificate struct {
func newCertificates(certTypeName string, cert []byte, log debugLog) (certs []Certificate, err error) {
newlinesTrimmed := strings.TrimSpace(string(cert))
log.Debug("Decoding PEM certificate: %s", certTypeName)
log.Debug(fmt.Sprintf("Decoding PEM certificate: %s", certTypeName))
i := 1
var rest []byte
var block *pem.Block
for block, rest = pem.Decode([]byte(newlinesTrimmed)); block != nil; block, rest = pem.Decode(rest) {
log.Debug("Parsing PEM block: %d", i)
log.Debug(fmt.Sprintf("Parsing PEM block: %d", i))
if block.Type != "CERTIFICATE" {
return certs, fmt.Errorf("parse %s: expected PEM block type 'CERTIFICATE', got '%s'", certTypeName, block.Type)
}

View file

@ -53,11 +53,11 @@ func (c *Client) CreateCertChainCache(ctx context.Context) (*CachedCerts, error)
case variant.AWSSEVSNP{}:
reportSigner = abi.VlekReportSigner
default:
c.log.Debug("No certificate chain caching possible for attestation variant %s", c.attVariant)
c.log.Debug(fmt.Sprintf("No certificate chain caching possible for attestation variant %s", c.attVariant))
return nil, nil
}
c.log.Debug("Creating %s certificate chain cache", c.attVariant)
c.log.Debug(fmt.Sprintf("Creating %s certificate chain cache", c.attVariant))
ask, ark, err := c.createCertChainCache(ctx, reportSigner)
if err != nil {
return nil, fmt.Errorf("creating %s certificate chain cache: %w", c.attVariant, err)