mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-09-17 03:24:49 -04:00
logging: unify debug log message format (#2997)
This commit is contained in:
parent
d51a6d5744
commit
75ceeb2de8
48 changed files with 183 additions and 169 deletions
|
@ -40,7 +40,7 @@ func runCheck(cmd *cobra.Command, _ []string) error {
|
|||
return err
|
||||
}
|
||||
log := logger.NewTextLogger(flags.logLevel)
|
||||
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
|
||||
log.Debug("Using flags", "mirror", flags.mirror, "mirrorUnauthenticated", flags.mirrorUnauthenticated)
|
||||
|
||||
filesHelper, err := bazelfiles.New()
|
||||
if err != nil {
|
||||
|
@ -89,7 +89,7 @@ func runCheck(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
|
||||
func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *slog.Logger) (issByFile issues.ByFile, err error) {
|
||||
log.Debug(fmt.Sprintf("Checking file: %s", bazelFile.RelPath))
|
||||
log.Debug(fmt.Sprintf("Checking file: %q", bazelFile.RelPath))
|
||||
issByFile = issues.NewByFile()
|
||||
buildfile, err := fileHelper.LoadFile(bazelFile)
|
||||
if err != nil {
|
||||
|
@ -97,12 +97,12 @@ func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCh
|
|||
}
|
||||
found := rules.Rules(buildfile, rules.SupportedRules)
|
||||
if len(found) == 0 {
|
||||
log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
|
||||
log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath))
|
||||
return issByFile, nil
|
||||
}
|
||||
log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
|
||||
log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath))
|
||||
for _, rule := range found {
|
||||
log.Debug(fmt.Sprintf("Checking rule: %s", rule.Name()))
|
||||
log.Debug(fmt.Sprintf("Checking rule: %q", rule.Name()))
|
||||
// check if the rule is a valid pinned dependency rule (has all required attributes)
|
||||
if issues := rules.ValidatePinned(rule); len(issues) > 0 {
|
||||
issByFile.Add(rule.Name(), issues...)
|
||||
|
|
|
@ -40,7 +40,7 @@ func runFix(cmd *cobra.Command, _ []string) error {
|
|||
return err
|
||||
}
|
||||
log := logger.NewTextLogger(flags.logLevel)
|
||||
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
|
||||
log.Debug("Using flags", "unauthenticated", flags.unauthenticated, "dryRun", flags.dryRun)
|
||||
|
||||
fileHelper, err := bazelfiles.New()
|
||||
if err != nil {
|
||||
|
@ -96,10 +96,10 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
|
|||
}
|
||||
found := rules.Rules(buildfile, rules.SupportedRules)
|
||||
if len(found) == 0 {
|
||||
log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
|
||||
log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath))
|
||||
return iss, nil
|
||||
}
|
||||
log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
|
||||
log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath))
|
||||
for _, rule := range found {
|
||||
changedRule, ruleIssues := fixRule(ctx, mirrorUpload, rule, log)
|
||||
if len(ruleIssues) > 0 {
|
||||
|
@ -113,7 +113,7 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
|
|||
return iss, nil
|
||||
}
|
||||
if !changed {
|
||||
log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath))
|
||||
log.Debug(fmt.Sprintf("No changes to file: %q", bazelFile.RelPath))
|
||||
return iss, nil
|
||||
}
|
||||
if dryRun {
|
||||
|
@ -142,12 +142,12 @@ func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *bu
|
|||
return err
|
||||
}
|
||||
rules.SetHash(rule, learnedHash)
|
||||
log.Debug(fmt.Sprintf("Learned hash for rule %s: %s", rule.Name(), learnedHash))
|
||||
log.Debug(fmt.Sprintf("Learned hash for rule %q: %q", rule.Name(), learnedHash))
|
||||
return nil
|
||||
}
|
||||
|
||||
func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) {
|
||||
log.Debug(fmt.Sprintf("Fixing rule: %s", rule.Name()))
|
||||
log.Debug(fmt.Sprintf("Fixing rule: %q", rule.Name()))
|
||||
|
||||
// try to learn the hash
|
||||
if hash, err := rules.GetHash(rule); err != nil || hash == "" {
|
||||
|
|
|
@ -95,10 +95,10 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
|
|||
}
|
||||
|
||||
for _, url := range urls {
|
||||
m.log.Debug(fmt.Sprintf("Mirroring file with hash %v from %q", hash, url))
|
||||
m.log.Debug(fmt.Sprintf("Mirroring file with hash %q from %q", hash, url))
|
||||
body, err := m.downloadFromUpstream(ctx, url)
|
||||
if err != nil {
|
||||
m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err))
|
||||
m.log.Debug(fmt.Sprintf("Failed to download file from %q: %q", url, err))
|
||||
continue
|
||||
}
|
||||
defer body.Close()
|
||||
|
@ -129,13 +129,13 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) {
|
|||
m.log.Debug(fmt.Sprintf("Learning new hash from %q", url))
|
||||
body, err := m.downloadFromUpstream(ctx, url)
|
||||
if err != nil {
|
||||
m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err))
|
||||
m.log.Debug(fmt.Sprintf("Failed to download file from %q: %q", url, err))
|
||||
continue
|
||||
}
|
||||
defer body.Close()
|
||||
streamedHash := sha256.New()
|
||||
if _, err := io.Copy(streamedHash, body); err != nil {
|
||||
m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %v", url, err))
|
||||
m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %q", url, err))
|
||||
}
|
||||
learnedHash := hex.EncodeToString(streamedHash.Sum(nil))
|
||||
m.log.Debug(fmt.Sprintf("File successfully downloaded from %q with %q", url, learnedHash))
|
||||
|
@ -146,7 +146,7 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) {
|
|||
|
||||
// Check checks if a file is present and has the correct hash in the CAS mirror.
|
||||
func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
|
||||
m.log.Debug(fmt.Sprintf("Checking consistency of object with hash %v", expectedHash))
|
||||
m.log.Debug(fmt.Sprintf("Checking consistency of object with hash %q", expectedHash))
|
||||
if m.unauthenticated {
|
||||
return m.checkUnauthenticated(ctx, expectedHash)
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
|
|||
// It uses the authenticated CAS s3 endpoint to download the file metadata.
|
||||
func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string) error {
|
||||
key := path.Join(keyBase, expectedHash)
|
||||
m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key))
|
||||
m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %q, Key: %q}", m.bucket, key))
|
||||
attributes, err := m.objectStorageClient.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
|
||||
Bucket: &m.bucket,
|
||||
Key: &key,
|
||||
|
@ -174,7 +174,7 @@ func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string
|
|||
// checksums are not guaranteed to be present
|
||||
// and if present, they are only meaningful for single part objects
|
||||
// fallback if checksum cannot be verified from attributes
|
||||
m.log.Debug(fmt.Sprintf("S3 object attributes cannot be used to verify key %v. Falling back to download.", key))
|
||||
m.log.Debug(fmt.Sprintf("S3 object attributes cannot be used to verify key %q. Falling back to download.", key))
|
||||
return m.checkUnauthenticated(ctx, expectedHash)
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ func (m *Maintainer) checkUnauthenticated(ctx context.Context, expectedHash stri
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.log.Debug(fmt.Sprintf("Check: http get {Url: %v}", pubURL))
|
||||
m.log.Debug(fmt.Sprintf("Check: http get {Url: %q}", pubURL))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, pubURL, http.NoBody)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -221,10 +221,10 @@ func (m *Maintainer) put(ctx context.Context, hash string, data io.Reader) error
|
|||
|
||||
key := path.Join(keyBase, hash)
|
||||
if m.dryRun {
|
||||
m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key))
|
||||
m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %q, Key: %q}", m.bucket, key))
|
||||
return nil
|
||||
}
|
||||
m.log.Debug(fmt.Sprintf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key))
|
||||
m.log.Debug(fmt.Sprintf("Uploading object with hash %q to \"s3://%s/%s\"", hash, m.bucket, key))
|
||||
_, err := m.uploadClient.Upload(ctx, &s3.PutObjectInput{
|
||||
Bucket: &m.bucket,
|
||||
Key: &key,
|
||||
|
|
|
@ -40,7 +40,7 @@ func runUpgrade(cmd *cobra.Command, _ []string) error {
|
|||
return err
|
||||
}
|
||||
log := logger.NewTextLogger(flags.logLevel)
|
||||
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
|
||||
log.Debug("Using flags", "unauthenticated", flags.unauthenticated, "dryRun", flags.dryRun)
|
||||
|
||||
fileHelper, err := bazelfiles.New()
|
||||
if err != nil {
|
||||
|
@ -96,10 +96,10 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
|
|||
}
|
||||
found := rules.Rules(buildfile, rules.SupportedRules)
|
||||
if len(found) == 0 {
|
||||
log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
|
||||
log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath))
|
||||
return iss, nil
|
||||
}
|
||||
log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
|
||||
log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath))
|
||||
for _, rule := range found {
|
||||
changedRule, ruleIssues := upgradeRule(ctx, mirrorUpload, rule, log)
|
||||
if len(ruleIssues) > 0 {
|
||||
|
@ -113,7 +113,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
|
|||
return iss, nil
|
||||
}
|
||||
if !changed {
|
||||
log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath))
|
||||
log.Debug(fmt.Sprintf("No changes to file: %q", bazelFile.RelPath))
|
||||
return iss, nil
|
||||
}
|
||||
if dryRun {
|
||||
|
@ -133,7 +133,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
|
|||
}
|
||||
|
||||
func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) {
|
||||
log.Debug(fmt.Sprintf("Upgrading rule: %s", rule.Name()))
|
||||
log.Debug(fmt.Sprintf("Upgrading rule: %q", rule.Name()))
|
||||
|
||||
upstreamURLs, err := rules.UpstreamURLs(rule)
|
||||
if errors.Is(err, rules.ErrNoUpstreamURL) {
|
||||
|
|
|
@ -45,14 +45,14 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
|
|||
return err
|
||||
}
|
||||
log := logger.NewTextLogger(flags.logLevel)
|
||||
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
|
||||
log.Debug("Using flags", "identifier", flags.identifier, "imageRepoTag", flags.imageRepoTag, "ociPath", flags.ociPath, "pkg", flags.pkg)
|
||||
|
||||
registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag)
|
||||
if err != nil {
|
||||
return fmt.Errorf("splitting OCI image reference %q: %w", flags.imageRepoTag, err)
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("Generating Go code for OCI image %s.", name))
|
||||
log.Debug(fmt.Sprintf("Generating Go code for OCI image %q.", name))
|
||||
|
||||
ociIndexPath := filepath.Join(flags.ociPath, "index.json")
|
||||
index, err := os.Open(ociIndexPath)
|
||||
|
@ -78,7 +78,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("OCI image digest: %s", digest))
|
||||
log.Debug(fmt.Sprintf("OCI image digest: %q", digest))
|
||||
|
||||
if err := inject.Render(out, inject.PinningValues{
|
||||
Package: flags.pkg,
|
||||
|
|
|
@ -36,7 +36,7 @@ func runMerge(cmd *cobra.Command, _ []string) error {
|
|||
return err
|
||||
}
|
||||
log := logger.NewTextLogger(flags.logLevel)
|
||||
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
|
||||
log.Debug("Using flags", "inputs", flags.inputs, "output", flags.output, "logLevel", flags.logLevel)
|
||||
|
||||
log.Debug(fmt.Sprintf("Merging sum file from %q into %q.", flags.inputs, flags.output))
|
||||
|
||||
|
|
|
@ -42,14 +42,14 @@ func runSum(cmd *cobra.Command, _ []string) error {
|
|||
return err
|
||||
}
|
||||
log := logger.NewTextLogger(flags.logLevel)
|
||||
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
|
||||
log.Debug("Using flags", "imageRepoTag", flags.imageRepoTag, "ociPath", flags.ociPath)
|
||||
|
||||
registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag)
|
||||
if err != nil {
|
||||
return fmt.Errorf("splitting repo tag: %w", err)
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("Generating sum file for OCI image %s.", name))
|
||||
log.Debug(fmt.Sprintf("Generating sum file for OCI image %q.", name))
|
||||
|
||||
ociIndexPath := filepath.Join(flags.ociPath, "index.json")
|
||||
index, err := os.Open(ociIndexPath)
|
||||
|
@ -75,7 +75,7 @@ func runSum(cmd *cobra.Command, _ []string) error {
|
|||
return fmt.Errorf("extracting OCI image digest: %w", err)
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("OCI image digest: %s", digest))
|
||||
log.Debug(fmt.Sprintf("OCI image digest: %q", digest))
|
||||
|
||||
refs := []sums.PinnedImageReference{
|
||||
{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue