deps: convert zap to slog (#2825)

This commit is contained in:
miampf 2024-02-08 14:20:01 +00:00 committed by GitHub
parent 3765cb0762
commit 54cce77bab
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
182 changed files with 1474 additions and 1509 deletions

View file

@ -18,7 +18,6 @@ go_library(
"//internal/logger",
"@com_github_bazelbuild_buildtools//build",
"@com_github_spf13_cobra//:cobra",
"@org_uber_go_zap//zapcore",
],
)

View file

@ -9,6 +9,8 @@ package main
import (
"context"
"errors"
"fmt"
"log/slog"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues"
@ -16,7 +18,6 @@ import (
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newCheckCmd() *cobra.Command {
@ -38,15 +39,15 @@ func runCheck(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := logger.NewTextLogger(flags.logLevel)
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
filesHelper, err := bazelfiles.New()
if err != nil {
return err
}
log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
bazelFiles, err := filesHelper.FindFiles()
if err != nil {
return err
@ -55,10 +56,10 @@ func runCheck(cmd *cobra.Command, _ []string) error {
var mirrorCheck mirrorChecker
switch {
case flags.mirrorUnauthenticated:
log.Debugf("Checking consistency of all referenced CAS objects without authentication.")
log.Debug("Checking consistency of all referenced CAS objects without authentication.")
mirrorCheck = mirror.NewUnauthenticated(flags.mirrorBaseURL, mirror.Run, log)
case flags.mirror:
log.Debugf("Checking consistency of all referenced CAS objects using AWS S3.")
log.Debug("Checking consistency of all referenced CAS objects using AWS S3.")
mirrorCheck, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, mirror.Run, log)
if err != nil {
return err
@ -78,17 +79,17 @@ func runCheck(cmd *cobra.Command, _ []string) error {
}
}
if len(iss) > 0 {
log.Infof("Found issues in rules")
log.Info("Found issues in rules")
iss.Report(cmd.OutOrStdout())
return errors.New("found issues in rules")
}
log.Infof("No issues found 🦭")
log.Info("No issues found 🦭")
return nil
}
func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *logger.Logger) (issByFile issues.ByFile, err error) {
log.Debugf("Checking file: %s", bazelFile.RelPath)
func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *slog.Logger) (issByFile issues.ByFile, err error) {
log.Debug(fmt.Sprintf("Checking file: %s", bazelFile.RelPath))
issByFile = issues.NewByFile()
buildfile, err := fileHelper.LoadFile(bazelFile)
if err != nil {
@ -96,12 +97,12 @@ func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCh
}
found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 {
log.Debugf("No rules found in file: %s", bazelFile.RelPath)
log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
return issByFile, nil
}
log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath)
log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
for _, rule := range found {
log.Debugf("Checking rule: %s", rule.Name())
log.Debug(fmt.Sprintf("Checking rule: %s", rule.Name()))
// check if the rule is a valid pinned dependency rule (has all required attributes)
if issues := rules.ValidatePinned(rule); len(issues) > 0 {
issByFile.Add(rule.Name(), issues...)
@ -130,7 +131,7 @@ type checkFlags struct {
region string
bucket string
mirrorBaseURL string
logLevel zapcore.Level
logLevel slog.Level
}
func parseCheckFlags(cmd *cobra.Command) (checkFlags, error) {
@ -146,9 +147,9 @@ func parseCheckFlags(cmd *cobra.Command) (checkFlags, error) {
if err != nil {
return checkFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
region, err := cmd.Flags().GetString("region")
if err != nil {

View file

@ -9,6 +9,8 @@ package main
import (
"context"
"errors"
"fmt"
"log/slog"
"github.com/bazelbuild/buildtools/build"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
@ -17,7 +19,6 @@ import (
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newFixCmd() *cobra.Command {
@ -38,15 +39,15 @@ func runFix(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := logger.NewTextLogger(flags.logLevel)
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
fileHelper, err := bazelfiles.New()
if err != nil {
return err
}
log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
bazelFiles, err := fileHelper.FindFiles()
if err != nil {
return err
@ -55,10 +56,10 @@ func runFix(cmd *cobra.Command, _ []string) error {
var mirrorUpload mirrorUploader
switch {
case flags.unauthenticated:
log.Warnf("Fixing rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.")
log.Warn("Fixing rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.")
mirrorUpload = mirror.NewUnauthenticated(flags.mirrorBaseURL, flags.dryRun, log)
default:
log.Debugf("Fixing rules with authentication for AWS S3.")
log.Debug("Fixing rules with authentication for AWS S3.")
mirrorUpload, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, flags.dryRun, log)
if err != nil {
return err
@ -76,29 +77,29 @@ func runFix(cmd *cobra.Command, _ []string) error {
}
}
if len(issues) > 0 {
log.Warnf("Found %d unfixable issues in rules", len(issues))
log.Warn(fmt.Sprintf("Found %d unfixable issues in rules", len(issues)))
issues.Report(cmd.OutOrStdout())
return errors.New("found issues in rules")
}
log.Infof("No unfixable issues found")
log.Info("No unfixable issues found")
return nil
}
func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *logger.Logger) (iss issues.ByFile, err error) {
func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) {
iss = issues.NewByFile()
var changed bool // true if any rule in this file was changed
log.Infof("Checking file: %s", bazelFile.RelPath)
log.Info(fmt.Sprintf("Checking file: %s", bazelFile.RelPath))
buildfile, err := fileHelper.LoadFile(bazelFile)
if err != nil {
return iss, err
}
found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 {
log.Debugf("No rules found in file: %s", bazelFile.RelPath)
log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
return iss, nil
}
log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath)
log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
for _, rule := range found {
changedRule, ruleIssues := fixRule(ctx, mirrorUpload, rule, log)
if len(ruleIssues) > 0 {
@ -108,11 +109,11 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
}
if len(iss) > 0 {
log.Warnf("File %s has issues. Not saving!", bazelFile.RelPath)
log.Warn(fmt.Sprintf("File %s has issues. Not saving!", bazelFile.RelPath))
return iss, nil
}
if !changed {
log.Debugf("No changes to file: %s", bazelFile.RelPath)
log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath))
return iss, nil
}
if dryRun {
@ -120,10 +121,10 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
if err != nil {
return iss, err
}
log.Infof("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
log.Info(fmt.Sprintf("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff))
return iss, nil
}
log.Infof("Saving updated file: %s", bazelFile.RelPath)
log.Info(fmt.Sprintf("Saving updated file: %s", bazelFile.RelPath))
if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil {
return iss, err
}
@ -131,7 +132,7 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
return iss, nil
}
func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) error {
func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) error {
upstreamURLs, err := rules.UpstreamURLs(rule)
if err != nil {
return err
@ -141,12 +142,12 @@ func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *bu
return err
}
rules.SetHash(rule, learnedHash)
log.Debugf("Learned hash for rule %s: %s", rule.Name(), learnedHash)
log.Debug(fmt.Sprintf("Learned hash for rule %s: %s", rule.Name(), learnedHash))
return nil
}
func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) (changed bool, iss []error) {
log.Debugf("Fixing rule: %s", rule.Name())
func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) {
log.Debug(fmt.Sprintf("Fixing rule: %s", rule.Name()))
// try to learn the hash
if hash, err := rules.GetHash(rule); err != nil || hash == "" {
@ -182,14 +183,14 @@ func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule,
}
if checkErr := mirrorUpload.Check(ctx, expectedHash); checkErr != nil {
log.Infof("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash)
log.Info(fmt.Sprintf("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash))
if uploadErr := mirrorUpload.Mirror(ctx, expectedHash, rules.GetURLs(rule)); uploadErr != nil {
// don't try to fix the rule if the upload failed
iss = append(iss, uploadErr)
return changed, iss
}
} else {
log.Infof("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash)
log.Info(fmt.Sprintf("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash))
}
// now the artifact is mirrored (if it wasn't already) and we can fix the rule
@ -211,7 +212,7 @@ type fixFlags struct {
region string
bucket string
mirrorBaseURL string
logLevel zapcore.Level
logLevel slog.Level
}
func parseFixFlags(cmd *cobra.Command) (fixFlags, error) {
@ -227,9 +228,9 @@ func parseFixFlags(cmd *cobra.Command) (fixFlags, error) {
if err != nil {
return fixFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
region, err := cmd.Flags().GetString("region")
if err != nil {

View file

@ -7,7 +7,6 @@ go_library(
importpath = "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror",
visibility = ["//hack/bazel-deps-mirror:__subpackages__"],
deps = [
"//internal/logger",
"@com_github_aws_aws_sdk_go_v2_config//:config",
"@com_github_aws_aws_sdk_go_v2_feature_s3_manager//:manager",
"@com_github_aws_aws_sdk_go_v2_service_s3//:s3",

View file

@ -15,6 +15,7 @@ import (
"errors"
"fmt"
"io"
"log/slog"
"net/http"
"net/url"
"path"
@ -23,7 +24,6 @@ import (
s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/edgelesssys/constellation/v2/internal/logger"
)
// Maintainer can upload and download files to and from a CAS mirror.
@ -39,11 +39,11 @@ type Maintainer struct {
unauthenticated bool
dryRun bool
log *logger.Logger
log *slog.Logger
}
// NewUnauthenticated creates a new Maintainer that dose not require authentication can only download files from a CAS mirror.
func NewUnauthenticated(mirrorBaseURL string, dryRun bool, log *logger.Logger) *Maintainer {
func NewUnauthenticated(mirrorBaseURL string, dryRun bool, log *slog.Logger) *Maintainer {
return &Maintainer{
httpClient: http.DefaultClient,
mirrorBaseURL: mirrorBaseURL,
@ -54,7 +54,7 @@ func NewUnauthenticated(mirrorBaseURL string, dryRun bool, log *logger.Logger) *
}
// New creates a new Maintainer that can upload and download files to and from a CAS mirror.
func New(ctx context.Context, region, bucket, mirrorBaseURL string, dryRun bool, log *logger.Logger) (*Maintainer, error) {
func New(ctx context.Context, region, bucket, mirrorBaseURL string, dryRun bool, log *slog.Logger) (*Maintainer, error) {
cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region))
if err != nil {
return nil, err
@ -95,17 +95,17 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
}
for _, url := range urls {
m.log.Debugf("Mirroring file with hash %v from %q", hash, url)
m.log.Debug(fmt.Sprintf("Mirroring file with hash %v from %q", hash, url))
body, err := m.downloadFromUpstream(ctx, url)
if err != nil {
m.log.Debugf("Failed to download file from %q: %v", url, err)
m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err))
continue
}
defer body.Close()
streamedHash := sha256.New()
tee := io.TeeReader(body, streamedHash)
if err := m.put(ctx, hash, tee); err != nil {
m.log.Warnf("Failed to stream file from upstream %q to mirror: %v.. Trying next url.", url, err)
m.log.Warn(fmt.Sprintf("Failed to stream file from upstream %q to mirror: %v.. Trying next url.", url, err))
continue
}
actualHash := hex.EncodeToString(streamedHash.Sum(nil))
@ -117,7 +117,7 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
if err != nil {
return err
}
m.log.Debugf("File uploaded successfully to mirror from %q as %q", url, pubURL)
m.log.Debug(fmt.Sprintf("File uploaded successfully to mirror from %q as %q", url, pubURL))
return nil
}
return fmt.Errorf("failed to download / reupload file with hash %v from any of the urls: %v", hash, urls)
@ -126,19 +126,19 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
// Learn downloads a file from one of the existing (non-mirror) urls, hashes it and returns the hash.
func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) {
for _, url := range urls {
m.log.Debugf("Learning new hash from %q", url)
m.log.Debug(fmt.Sprintf("Learning new hash from %q", url))
body, err := m.downloadFromUpstream(ctx, url)
if err != nil {
m.log.Debugf("Failed to download file from %q: %v", url, err)
m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err))
continue
}
defer body.Close()
streamedHash := sha256.New()
if _, err := io.Copy(streamedHash, body); err != nil {
m.log.Debugf("Failed to stream file from %q: %v", url, err)
m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %v", url, err))
}
learnedHash := hex.EncodeToString(streamedHash.Sum(nil))
m.log.Debugf("File successfully downloaded from %q with %q", url, learnedHash)
m.log.Debug(fmt.Sprintf("File successfully downloaded from %q with %q", url, learnedHash))
return learnedHash, nil
}
return "", fmt.Errorf("failed to download file / learn hash from any of the urls: %v", urls)
@ -146,7 +146,7 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) {
// Check checks if a file is present and has the correct hash in the CAS mirror.
func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
m.log.Debugf("Checking consistency of object with hash %v", expectedHash)
m.log.Debug(fmt.Sprintf("Checking consistency of object with hash %v", expectedHash))
if m.unauthenticated {
return m.checkUnauthenticated(ctx, expectedHash)
}
@ -157,7 +157,7 @@ func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
// It uses the authenticated CAS s3 endpoint to download the file metadata.
func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string) error {
key := path.Join(keyBase, expectedHash)
m.log.Debugf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key)
m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key))
attributes, err := m.objectStorageClient.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
Bucket: &m.bucket,
Key: &key,
@ -174,7 +174,7 @@ func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string
// checksums are not guaranteed to be present
// and if present, they are only meaningful for single part objects
// fallback if checksum cannot be verified from attributes
m.log.Debugf("S3 object attributes cannot be used to verify key %v. Falling back to download.", key)
m.log.Debug(fmt.Sprintf("S3 object attributes cannot be used to verify key %v. Falling back to download.", key))
return m.checkUnauthenticated(ctx, expectedHash)
}
@ -192,7 +192,7 @@ func (m *Maintainer) checkUnauthenticated(ctx context.Context, expectedHash stri
if err != nil {
return err
}
m.log.Debugf("Check: http get {Url: %v}", pubURL)
m.log.Debug(fmt.Sprintf("Check: http get {Url: %v}", pubURL))
req, err := http.NewRequestWithContext(ctx, http.MethodGet, pubURL, http.NoBody)
if err != nil {
return err
@ -221,10 +221,10 @@ func (m *Maintainer) put(ctx context.Context, hash string, data io.Reader) error
key := path.Join(keyBase, hash)
if m.dryRun {
m.log.Debugf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key)
m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key))
return nil
}
m.log.Debugf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key)
m.log.Debug(fmt.Sprintf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key))
_, err := m.uploadClient.Upload(ctx, &s3.PutObjectInput{
Bucket: &m.bucket,
Key: &key,

View file

@ -9,6 +9,8 @@ package main
import (
"context"
"errors"
"fmt"
"log/slog"
"github.com/bazelbuild/buildtools/build"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
@ -17,7 +19,6 @@ import (
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newUpgradeCmd() *cobra.Command {
@ -38,15 +39,15 @@ func runUpgrade(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := logger.NewTextLogger(flags.logLevel)
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
fileHelper, err := bazelfiles.New()
if err != nil {
return err
}
log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
bazelFiles, err := fileHelper.FindFiles()
if err != nil {
return err
@ -55,10 +56,10 @@ func runUpgrade(cmd *cobra.Command, _ []string) error {
var mirrorUpload mirrorUploader
switch {
case flags.unauthenticated:
log.Warnf("Upgrading rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.")
log.Warn("Upgrading rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.")
mirrorUpload = mirror.NewUnauthenticated(flags.mirrorBaseURL, flags.dryRun, log)
default:
log.Debugf("Upgrading rules with authentication for AWS S3.")
log.Debug("Upgrading rules with authentication for AWS S3.")
mirrorUpload, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, flags.dryRun, log)
if err != nil {
return err
@ -76,29 +77,29 @@ func runUpgrade(cmd *cobra.Command, _ []string) error {
}
}
if len(issues) > 0 {
log.Warnf("Found %d issues in rules", len(issues))
log.Warn(fmt.Sprintf("Found %d issues in rules", len(issues)))
issues.Report(cmd.OutOrStdout())
return errors.New("found issues in rules")
}
log.Infof("No issues found")
log.Info("No issues found")
return nil
}
func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *logger.Logger) (iss issues.ByFile, err error) {
func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) {
iss = issues.NewByFile()
var changed bool // true if any rule in this file was changed
log.Infof("Checking file: %s", bazelFile.RelPath)
log.Info(fmt.Sprintf("Checking file: %s", bazelFile.RelPath))
buildfile, err := fileHelper.LoadFile(bazelFile)
if err != nil {
return iss, err
}
found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 {
log.Debugf("No rules found in file: %s", bazelFile.RelPath)
log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
return iss, nil
}
log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath)
log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
for _, rule := range found {
changedRule, ruleIssues := upgradeRule(ctx, mirrorUpload, rule, log)
if len(ruleIssues) > 0 {
@ -108,11 +109,11 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
}
if len(iss) > 0 {
log.Warnf("File %s has issues. Not saving!", bazelFile.RelPath)
log.Warn(fmt.Sprintf("File %s has issues. Not saving!", bazelFile.RelPath))
return iss, nil
}
if !changed {
log.Debugf("No changes to file: %s", bazelFile.RelPath)
log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath))
return iss, nil
}
if dryRun {
@ -120,10 +121,10 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
if err != nil {
return iss, err
}
log.Infof("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
log.Info(fmt.Sprintf("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff))
return iss, nil
}
log.Infof("Saving updated file: %s", bazelFile.RelPath)
log.Info(fmt.Sprintf("Saving updated file: %s", bazelFile.RelPath))
if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil {
return iss, err
}
@ -131,12 +132,12 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
return iss, nil
}
func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) (changed bool, iss []error) {
log.Debugf("Upgrading rule: %s", rule.Name())
func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) {
log.Debug(fmt.Sprintf("Upgrading rule: %s", rule.Name()))
upstreamURLs, err := rules.UpstreamURLs(rule)
if errors.Is(err, rules.ErrNoUpstreamURL) {
log.Debugf("Rule has no upstream URL. Skipping.")
log.Debug("Rule has no upstream URL. Skipping.")
return false, nil
} else if err != nil {
iss = append(iss, err)
@ -152,7 +153,7 @@ func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.R
existingHash, err := rules.GetHash(rule)
if err == nil && learnedHash == existingHash {
log.Debugf("Rule already upgraded. Skipping.")
log.Debug("Rule already upgraded. Skipping.")
return false, nil
}
@ -177,7 +178,7 @@ type upgradeFlags struct {
region string
bucket string
mirrorBaseURL string
logLevel zapcore.Level
logLevel slog.Level
}
func parseUpgradeFlags(cmd *cobra.Command) (upgradeFlags, error) {
@ -193,9 +194,9 @@ func parseUpgradeFlags(cmd *cobra.Command) (upgradeFlags, error) {
if err != nil {
return upgradeFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
region, err := cmd.Flags().GetString("region")
if err != nil {

View file

@ -10,7 +10,6 @@ go_library(
"//internal/constants",
"//internal/logger",
"//internal/versions",
"@org_uber_go_zap//zapcore",
],
)

View file

@ -10,12 +10,14 @@ package main
import (
"context"
"flag"
"fmt"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions"
"go.uber.org/zap/zapcore"
)
var (
@ -25,18 +27,21 @@ var (
)
func main() {
log := logger.New(logger.PlainLog, zapcore.DebugLevel)
log := logger.NewTextLogger(slog.LevelDebug)
ctx := context.Background()
flag.Parse()
if *refFlag == "" {
log.Fatalf("ref must be set")
log.Error("ref must be set")
os.Exit(1)
}
if *streamFlag == "" {
log.Fatalf("stream must be set")
log.Error("stream must be set")
os.Exit(1)
}
if *versionFlag == "" {
log.Fatalf("version must be set")
log.Error("version must be set")
os.Exit(1)
}
cliInfo := versionsapi.CLIInfo{
@ -52,15 +57,18 @@ func main() {
c, cclose, err := versionsapi.NewClient(ctx, "eu-central-1", "cdn-constellation-backend", constants.CDNDefaultDistributionID, false, log)
if err != nil {
log.Fatalf("creating s3 client: %w", err)
log.Error(fmt.Sprintf("creating s3 client: %s", err))
os.Exit(1)
}
defer func() {
if err := cclose(ctx); err != nil {
log.Fatalf("invalidating cache: %w", err)
log.Error(fmt.Sprintf("invalidating cache: %s", err))
os.Exit(1)
}
}()
if err := c.UpdateCLIInfo(ctx, cliInfo); err != nil {
log.Fatalf("updating cli info: %w", err)
log.Error(fmt.Sprintf("updating cli info: %s", err))
os.Exit(1)
}
}

View file

@ -16,7 +16,6 @@ go_library(
"//hack/oci-pin/internal/sums",
"//internal/logger",
"@com_github_spf13_cobra//:cobra",
"@org_uber_go_zap//zapcore",
],
)

View file

@ -8,6 +8,7 @@ package main
import (
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"strings"
@ -16,7 +17,6 @@ import (
"github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/inject"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newCodegenCmd() *cobra.Command {
@ -44,15 +44,15 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := logger.NewTextLogger(flags.logLevel)
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag)
if err != nil {
return fmt.Errorf("splitting OCI image reference %q: %w", flags.imageRepoTag, err)
}
log.Debugf("Generating Go code for OCI image %s.", name)
log.Debug(fmt.Sprintf("Generating Go code for OCI image %s.", name))
ociIndexPath := filepath.Join(flags.ociPath, "index.json")
index, err := os.Open(ociIndexPath)
@ -78,7 +78,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
return err
}
log.Debugf("OCI image digest: %s", digest)
log.Debug(fmt.Sprintf("OCI image digest: %s", digest))
if err := inject.Render(out, inject.PinningValues{
Package: flags.pkg,
@ -92,7 +92,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("rendering Go code: %w", err)
}
log.Debugf("Go code created at %q 🤖", flags.output)
log.Debug(fmt.Sprintf("Go code created at %q 🤖", flags.output))
return nil
}
@ -102,7 +102,7 @@ type codegenFlags struct {
pkg string
identifier string
imageRepoTag string
logLevel zapcore.Level
logLevel slog.Level
}
func parseCodegenFlags(cmd *cobra.Command) (codegenFlags, error) {
@ -137,9 +137,9 @@ func parseCodegenFlags(cmd *cobra.Command) (codegenFlags, error) {
if err != nil {
return codegenFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return codegenFlags{

View file

@ -8,12 +8,12 @@ package main
import (
"fmt"
"io"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/sums"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newMergeCmd() *cobra.Command {
@ -35,10 +35,10 @@ func runMerge(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := logger.NewTextLogger(flags.logLevel)
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
log.Debugf("Merging sum file from %q into %q.", flags.inputs, flags.output)
log.Debug(fmt.Sprintf("Merging sum file from %q into %q.", flags.inputs, flags.output))
var out io.Writer
if flags.output == "-" {
@ -61,7 +61,7 @@ func runMerge(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("creating merged sum file: %w", err)
}
log.Debugf("Sum file created at %q 🤖", flags.output)
log.Debug(fmt.Sprintf("Sum file created at %q 🤖", flags.output))
return nil
}
@ -93,7 +93,7 @@ func parseInput(input string) ([]sums.PinnedImageReference, error) {
type mergeFlags struct {
inputs []string
output string
logLevel zapcore.Level
logLevel slog.Level
}
func parseMergeFlags(cmd *cobra.Command) (mergeFlags, error) {
@ -109,9 +109,9 @@ func parseMergeFlags(cmd *cobra.Command) (mergeFlags, error) {
if err != nil {
return mergeFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return mergeFlags{

View file

@ -8,6 +8,7 @@ package main
import (
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"strings"
@ -16,7 +17,6 @@ import (
"github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/sums"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newSumCmd() *cobra.Command {
@ -41,15 +41,15 @@ func runSum(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
log := logger.NewTextLogger(flags.logLevel)
log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag)
if err != nil {
return fmt.Errorf("splitting repo tag: %w", err)
}
log.Debugf("Generating sum file for OCI image %s.", name)
log.Debug(fmt.Sprintf("Generating sum file for OCI image %s.", name))
ociIndexPath := filepath.Join(flags.ociPath, "index.json")
index, err := os.Open(ociIndexPath)
@ -75,7 +75,7 @@ func runSum(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("extracting OCI image digest: %w", err)
}
log.Debugf("OCI image digest: %s", digest)
log.Debug(fmt.Sprintf("OCI image digest: %s", digest))
refs := []sums.PinnedImageReference{
{
@ -91,7 +91,7 @@ func runSum(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("creating sum file: %w", err)
}
log.Debugf("Sum file created at %q 🤖", flags.output)
log.Debug(fmt.Sprintf("Sum file created at %q 🤖", flags.output))
return nil
}
@ -99,7 +99,7 @@ type sumFlags struct {
ociPath string
output string
imageRepoTag string
logLevel zapcore.Level
logLevel slog.Level
}
func parseSumFlags(cmd *cobra.Command) (sumFlags, error) {
@ -126,9 +126,9 @@ func parseSumFlags(cmd *cobra.Command) (sumFlags, error) {
if err != nil {
return sumFlags{}, err
}
logLevel := zapcore.InfoLevel
logLevel := slog.LevelInfo
if verbose {
logLevel = zapcore.DebugLevel
logLevel = slog.LevelDebug
}
return sumFlags{

View file

@ -17,8 +17,6 @@ go_library(
"//hack/qemu-metadata-api/virtwrapper",
"//internal/logger",
"@org_libvirt_go_libvirt//:libvirt",
"@org_uber_go_zap//:zap",
"@org_uber_go_zap//zapcore",
],
)

View file

@ -10,12 +10,12 @@ package main
import (
"flag"
"log/slog"
"os"
"github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/server"
"github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper"
"github.com/edgelesssys/constellation/v2/internal/logger"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"libvirt.org/go/libvirt"
)
@ -26,16 +26,18 @@ func main() {
initSecretHash := flag.String("initsecrethash", "", "brcypt hash of the init secret")
flag.Parse()
log := logger.New(logger.JSONLog, zapcore.InfoLevel)
log := logger.NewJSONLogger(slog.LevelInfo)
conn, err := libvirt.NewConnect(*libvirtURI)
if err != nil {
log.With(zap.Error(err)).Fatalf("Failed to connect to libvirt")
log.With(slog.Any("error", err)).Error("Failed to connect to libvirt")
os.Exit(1)
}
defer conn.Close()
serv := server.New(log, *targetNetwork, *initSecretHash, &virtwrapper.Connect{Conn: conn})
if err := serv.ListenAndServe(*bindPort); err != nil {
log.With(zap.Error(err)).Fatalf("Failed to serve")
log.With(slog.Any("error", err)).Error("Failed to serve")
os.Exit(1)
}
}

View file

@ -12,9 +12,7 @@ go_library(
deps = [
"//hack/qemu-metadata-api/virtwrapper",
"//internal/cloud/metadata",
"//internal/logger",
"//internal/role",
"@org_uber_go_zap//:zap",
],
)

View file

@ -9,27 +9,26 @@ package server
import (
"encoding/json"
"fmt"
"log/slog"
"net"
"net/http"
"strings"
"github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper"
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/role"
"go.uber.org/zap"
)
// Server that provides QEMU metadata.
type Server struct {
log *logger.Logger
log *slog.Logger
virt virConnect
network string
initSecretHashVal []byte
}
// New creates a new Server.
func New(log *logger.Logger, network, initSecretHash string, conn virConnect) *Server {
func New(log *slog.Logger, network, initSecretHash string, conn virConnect) *Server {
return &Server{
log: log,
virt: conn,
@ -55,25 +54,25 @@ func (s *Server) ListenAndServe(port string) error {
return err
}
s.log.Infof("Starting QEMU metadata API on %s", lis.Addr())
s.log.Info(fmt.Sprintf("Starting QEMU metadata API on %s", lis.Addr()))
return server.Serve(lis)
}
// listSelf returns peer information about the instance issuing the request.
func (s *Server) listSelf(w http.ResponseWriter, r *http.Request) {
log := s.log.With(zap.String("peer", r.RemoteAddr))
log.Infof("Serving GET request for /self")
log := s.log.With(slog.String("peer", r.RemoteAddr))
log.Info("Serving GET request for /self")
remoteIP, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to parse remote address")
log.With(slog.Any("error", err)).Error("Failed to parse remote address")
http.Error(w, fmt.Sprintf("Failed to parse remote address: %s\n", err), http.StatusInternalServerError)
return
}
peers, err := s.listAll()
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to list peer metadata")
log.With(slog.Any("error", err)).Error("Failed to list peer metadata")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@ -85,23 +84,23 @@ func (s *Server) listSelf(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Infof("Request successful")
log.Info("Request successful")
return
}
}
log.Errorf("Failed to find peer in active leases")
log.Error("Failed to find peer in active leases")
http.Error(w, "No matching peer found", http.StatusNotFound)
}
// listPeers returns a list of all active peers.
func (s *Server) listPeers(w http.ResponseWriter, r *http.Request) {
log := s.log.With(zap.String("peer", r.RemoteAddr))
log.Infof("Serving GET request for /peers")
log := s.log.With(slog.String("peer", r.RemoteAddr))
log.Info("Serving GET request for /peers")
peers, err := s.listAll()
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to list peer metadata")
log.With(slog.Any("error", err)).Error("Failed to list peer metadata")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@ -111,38 +110,38 @@ func (s *Server) listPeers(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Infof("Request successful")
log.Info("Request successful")
}
// initSecretHash returns the hash of the init secret.
func (s *Server) initSecretHash(w http.ResponseWriter, r *http.Request) {
log := s.log.With(zap.String("initSecretHash", r.RemoteAddr))
log := s.log.With(slog.String("initSecretHash", r.RemoteAddr))
if r.Method != http.MethodGet {
log.With(zap.String("method", r.Method)).Errorf("Invalid method for /initSecretHash")
log.With(slog.String("method", r.Method)).Error("Invalid method for /initSecretHash")
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
log.Infof("Serving GET request for /initsecrethash")
log.Info("Serving GET request for /initsecrethash")
w.Header().Set("Content-Type", "text/plain")
_, err := w.Write(s.initSecretHashVal)
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to write init secret hash")
log.With(slog.Any("error", err)).Error("Failed to write init secret hash")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Infof("Request successful")
log.Info("Request successful")
}
// getEndpoint returns the IP address of the first control-plane instance.
// This allows us to fake a load balancer for QEMU instances.
func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) {
log := s.log.With(zap.String("peer", r.RemoteAddr))
log.Infof("Serving GET request for /endpoint")
log := s.log.With(slog.String("peer", r.RemoteAddr))
log.Info("Serving GET request for /endpoint")
net, err := s.virt.LookupNetworkByName(s.network)
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to lookup network")
log.With(slog.Any("error", err)).Error("Failed to lookup network")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@ -150,7 +149,7 @@ func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) {
leases, err := net.GetDHCPLeases()
if err != nil {
log.With(zap.Error(err)).Errorf("Failed to get DHCP leases")
log.With(slog.Any("error", err)).Error("Failed to get DHCP leases")
http.Error(w, err.Error(), http.StatusInternalServerError)
}
@ -162,12 +161,12 @@ func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Infof("Request successful")
log.Info("Request successful")
return
}
}
log.Errorf("Failed to find control-plane peer in active leases")
log.Error("Failed to find control-plane peer in active leases")
http.Error(w, "No matching peer found", http.StatusNotFound)
}