fixed final issues

fixed format strings for slog
This commit is contained in:
miampf 2024-01-15 14:22:01 +01:00
parent 69556e84d9
commit 941889ade7
No known key found for this signature in database
GPG key ID: 376EAC0E5307A669
58 changed files with 191 additions and 2210 deletions

View file

@ -121,7 +121,7 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
if err != nil {
return iss, err
}
log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
log.Info(fmt.Sprintf("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff))
return iss, nil
}
log.Info(fmt.Sprintf("Saving updated file: %s", bazelFile.RelPath))
@ -142,7 +142,7 @@ func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *bu
return err
}
rules.SetHash(rule, learnedHash)
log.Debug("Learned hash for rule %s: %s", rule.Name(), learnedHash)
log.Debug(fmt.Sprintf("Learned hash for rule %s: %s", rule.Name(), learnedHash))
return nil
}
@ -183,14 +183,14 @@ func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule,
}
if checkErr := mirrorUpload.Check(ctx, expectedHash); checkErr != nil {
log.Info("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash)
log.Info(fmt.Sprintf("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash))
if uploadErr := mirrorUpload.Mirror(ctx, expectedHash, rules.GetURLs(rule)); uploadErr != nil {
// don't try to fix the rule if the upload failed
iss = append(iss, uploadErr)
return changed, iss
}
} else {
log.Info("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash)
log.Info(fmt.Sprintf("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash))
}
// now the artifact is mirrored (if it wasn't already) and we can fix the rule

View file

@ -95,17 +95,17 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
}
for _, url := range urls {
m.log.Debug("Mirroring file with hash %v from %q", hash, url)
m.log.Debug(fmt.Sprintf("Mirroring file with hash %v from %q", hash, url))
body, err := m.downloadFromUpstream(ctx, url)
if err != nil {
m.log.Debug("Failed to download file from %q: %v", url, err)
m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err))
continue
}
defer body.Close()
streamedHash := sha256.New()
tee := io.TeeReader(body, streamedHash)
if err := m.put(ctx, hash, tee); err != nil {
m.log.Warn("Failed to stream file from upstream %q to mirror: %v.. Trying next url.", url, err)
m.log.Warn(fmt.Sprintf("Failed to stream file from upstream %q to mirror: %v.. Trying next url.", url, err))
continue
}
actualHash := hex.EncodeToString(streamedHash.Sum(nil))
@ -117,7 +117,7 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
if err != nil {
return err
}
m.log.Debug("File uploaded successfully to mirror from %q as %q", url, pubURL)
m.log.Debug(fmt.Sprintf("File uploaded successfully to mirror from %q as %q", url, pubURL))
return nil
}
return fmt.Errorf("failed to download / reupload file with hash %v from any of the urls: %v", hash, urls)
@ -129,16 +129,16 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) {
m.log.Debug(fmt.Sprintf("Learning new hash from %q", url))
body, err := m.downloadFromUpstream(ctx, url)
if err != nil {
m.log.Debug("Failed to download file from %q: %v", url, err)
m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err))
continue
}
defer body.Close()
streamedHash := sha256.New()
if _, err := io.Copy(streamedHash, body); err != nil {
m.log.Debug("Failed to stream file from %q: %v", url, err)
m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %v", url, err))
}
learnedHash := hex.EncodeToString(streamedHash.Sum(nil))
m.log.Debug("File successfully downloaded from %q with %q", url, learnedHash)
m.log.Debug(fmt.Sprintf("File successfully downloaded from %q with %q", url, learnedHash))
return learnedHash, nil
}
return "", fmt.Errorf("failed to download file / learn hash from any of the urls: %v", urls)
@ -157,7 +157,7 @@ func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
// It uses the authenticated CAS s3 endpoint to download the file metadata.
func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string) error {
key := path.Join(keyBase, expectedHash)
m.log.Debug("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key)
m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key))
attributes, err := m.objectStorageClient.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
Bucket: &m.bucket,
Key: &key,
@ -221,10 +221,10 @@ func (m *Maintainer) put(ctx context.Context, hash string, data io.Reader) error
key := path.Join(keyBase, hash)
if m.dryRun {
m.log.Debug("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key)
m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key))
return nil
}
m.log.Debug(fmt.Sprintf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key))
m.log.Debug(fmt.Sprintf(fmt.Sprintf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key)))
_, err := m.uploadClient.Upload(ctx, &s3.PutObjectInput{
Bucket: &m.bucket,
Key: &key,

View file

@ -121,7 +121,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
if err != nil {
return iss, err
}
log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
log.Info(fmt.Sprintf("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff))
return iss, nil
}
log.Info(fmt.Sprintf("Saving updated file: %s", bazelFile.RelPath))

View file

@ -8,14 +8,15 @@ SPDX-License-Identifier: AGPL-3.0-only
package main
import (
"context"
"flag"
"log/slog"
"os"
"context"
"flag"
"fmt"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/versions"
)
var (
@ -55,18 +56,18 @@ func main() {
c, cclose, err := versionsapi.NewClient(ctx, "eu-central-1", "cdn-constellation-backend", constants.CDNDefaultDistributionID, false, log)
if err != nil {
log.Error("creating s3 client: %w", err)
log.Error(fmt.Sprintf("creating s3 client: %s", err))
os.Exit(1)
}
defer func() {
if err := cclose(ctx); err != nil {
log.Error("invalidating cache: %w", err)
log.Error(fmt.Sprintf("invalidating cache: %s", err))
os.Exit(1)
}
}()
if err := c.UpdateCLIInfo(ctx, cliInfo); err != nil {
log.Error("updating cli info: %w", err)
log.Error(fmt.Sprintf("updating cli info: %s", err))
os.Exit(1)
}
}

View file

@ -55,7 +55,7 @@ func (s *Server) ListenAndServe(port string) error {
return err
}
s.log.Info("Starting QEMU metadata API on %s", lis.Addr())
s.log.Info(fmt.Sprintf("Starting QEMU metadata API on %s", lis.Addr()))
return server.Serve(lis)
}