bazel-deps-mirror: upgrade command (#1617)

* bazel-deps-mirror: upgrade command

This command can be used to upgrade a dependency.
Users are supposed to replace any upstream URLs and run the upgrade command.
It replaces the expected hash and uploads the new dep to the mirror.
This commit is contained in:
Malte Poll 2023-04-05 17:32:51 +02:00 committed by GitHub
parent 69de06dd1f
commit 0ece41c146
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 476 additions and 29 deletions

View File

@ -50,7 +50,9 @@ jobs:
- name: Upload Bazel dependencies to the mirror
if: startsWith(github.head_ref, 'renovate/')
shell: bash
run: bazelisk run //bazel/ci:deps_mirror_upload
run: |
bazelisk run //bazel/ci:deps_mirror_upgrade
bazelisk run //bazel/ci:deps_mirror_upload
- name: Run Bazel tidy
shell: bash

View File

@ -17,6 +17,7 @@
/e2e @katexochen
/hack/azure-jump-host @malt3
/hack/azure-snp-report-verify @derpsteb
/hack/bazel-deps-mirror @malt3
/hack/check-licenses.sh @thomasten
/hack/clidocgen @thomasten
/hack/fetch-broken-e2e @katexochen

View File

@ -332,6 +332,10 @@ sh_template(
template = "go_generate.sh.in",
)
# deps_mirror_fix fixes bazel workspace rules for external dependencies.
# It normalizes the rules and rewrites WORKSPACE and bzl files.
# If files are not in the mirror, it will fail.
# Use deps_mirror_upload to upload missing files.
repo_command(
name = "deps_mirror_fix",
args = [
@ -341,6 +345,8 @@ repo_command(
command = "//hack/bazel-deps-mirror",
)
# deps_mirror_upload fixes bazel workspace rules for external dependencies.
# It uploads all dependencies to the mirror, normalizes the rules and rewrites WORKSPACE and bzl files.
repo_command(
name = "deps_mirror_upload",
args = [
@ -349,6 +355,21 @@ repo_command(
command = "//hack/bazel-deps-mirror",
)
# deps_mirror_upgrade upgrades bazel workspace rules for external dependencies.
# Users are supposed to replace any upstream URLs.
# It replaces the expected hash and uploads the new dep to the mirror.
repo_command(
name = "deps_mirror_upgrade",
args = [
"upgrade",
],
command = "//hack/bazel-deps-mirror",
)
# deps_mirror_check checks bazel workspace rules for external dependencies.
# It checks if all dependency rules have mirror urls and are properly formatted.
# It doesn't check if the mirror has the files.
# Use deps_mirror_check_mirror to check if the mirror has the files.
repo_command(
name = "deps_mirror_check",
args = [
@ -357,6 +378,8 @@ repo_command(
command = "//hack/bazel-deps-mirror",
)
# deps_mirror_check_mirror checks bazel workspace rules for external dependencies.
# It checks if all dependency rules are correctly mirrored and checks that the rules are properly formatted.
repo_command(
name = "deps_mirror_check_mirror",
args = [

View File

@ -6,6 +6,7 @@ go_library(
"bazel-deps-mirror.go",
"check.go",
"fix.go",
"upgrade.go",
],
importpath = "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror",
visibility = ["//visibility:private"],

View File

@ -46,6 +46,7 @@ func newRootCmd() *cobra.Command {
rootCmd.AddCommand(newCheckCmd())
rootCmd.AddCommand(newFixCmd())
rootCmd.AddCommand(newUpgradeCmd())
return rootCmd
}

View File

@ -97,7 +97,7 @@ func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCh
found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 {
log.Debugf("No rules found in file: %s", bazelFile.RelPath)
return
return issByFile, nil
}
log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath)
for _, rule := range found {
@ -121,7 +121,7 @@ func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCh
}
}
}
return
return issByFile, nil
}
type checkFlags struct {

View File

@ -131,41 +131,72 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
return iss, nil
}
func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) error {
upstreamURLs, err := rules.UpstreamURLs(rule)
if err != nil {
return err
}
learnedHash, learnErr := mirrorUpload.Learn(ctx, upstreamURLs)
if learnErr != nil {
return err
}
rules.SetHash(rule, learnedHash)
log.Debugf("Learned hash for rule %s: %s", rule.Name(), learnedHash)
return nil
}
func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) (changed bool, iss []error) {
log.Debugf("Fixing rule: %s", rule.Name())
// try to learn the hash
if hash, err := rules.GetHash(rule); err != nil || hash == "" {
if err := learnHashForRule(ctx, mirrorUpload, rule, log); err != nil {
// don't try to fix the rule if the hash is missing and we can't learn it
iss = append(iss,
errors.New("hash attribute is missing and can't learn it from upstream."+
"Unable to check if the artifact is already mirrored or upload it"))
return false, iss
}
changed = true
}
// check if the rule is a valid pinned dependency rule (has all required attributes)
issue := rules.ValidatePinned(rule)
if issue != nil {
// don't try to fix the rule if it's invalid
iss = append(iss, issue...)
return
return false, iss
}
// check if the referenced CAS object exists in the mirror and is consistent
expectedHash, expectedHashErr := rules.GetHash(rule)
if expectedHashErr != nil {
// don't try to fix the rule if the hash is missing
iss = append(iss,
errors.New("hash attribute is missing. unable to check if the artifact is already mirrored or upload it"))
return
iss = append(iss, expectedHashErr)
return false, iss
}
if rules.HasMirrorURL(rule) {
changed = rules.Normalize(rule)
return
changed = rules.Normalize(rule) || changed
return changed, iss
}
if checkErr := mirrorUpload.Check(ctx, expectedHash); checkErr != nil {
log.Infof("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash)
if uploadErr := mirrorUpload.Mirror(ctx, expectedHash, rules.GetURLs(rule)); uploadErr != nil {
// don't try to fix the rule if the upload failed
iss = append(iss, uploadErr)
return
return changed, iss
}
} else {
log.Infof("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash)
}
// now the artifact is mirrored (if it wasn't already) and we can fix the rule
mirrorURL, err := mirrorUpload.MirrorURL(expectedHash)
if err != nil {
iss = append(iss, err)
return
return changed, iss
}
rules.AddURLs(rule, []string{mirrorURL})
@ -224,6 +255,7 @@ func parseFixFlags(cmd *cobra.Command) (fixFlags, error) {
}
type mirrorUploader interface {
Learn(ctx context.Context, urls []string) (string, error)
Check(ctx context.Context, expectedHash string) error
Mirror(ctx context.Context, hash string, urls []string) error
MirrorURL(hash string) (string, error)

View File

@ -25,6 +25,7 @@ go_test(
"@com_github_aws_aws_sdk_go_v2_service_s3//:s3",
"@com_github_aws_aws_sdk_go_v2_service_s3//types",
"@com_github_stretchr_testify//assert",
"@com_github_stretchr_testify//require",
"@org_uber_go_goleak//:goleak",
],
)

View File

@ -123,6 +123,27 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
return fmt.Errorf("failed to download / reupload file with hash %v from any of the urls: %v", hash, urls)
}
// Learn downloads a file from one of the existing (non-mirror) urls, hashes it and returns the hash.
func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) {
for _, url := range urls {
m.log.Debugf("Learning new hash from %q", url)
body, err := m.downloadFromUpstream(ctx, url)
if err != nil {
m.log.Debugf("Failed to download file from %q: %v", url, err)
continue
}
defer body.Close()
streamedHash := sha256.New()
if _, err := io.Copy(streamedHash, body); err != nil {
m.log.Debugf("Failed to stream file from %q: %v", url, err)
}
learnedHash := hex.EncodeToString(streamedHash.Sum(nil))
m.log.Debugf("File successfully downloaded from %q with %q", url, learnedHash)
return learnedHash, nil
}
return "", fmt.Errorf("failed to download file / learn hash from any of the urls: %v", urls)
}
// Check checks if a file is present and has the correct hash in the CAS mirror.
func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
m.log.Debugf("Checking consistency of object with hash %v", expectedHash)

View File

@ -19,6 +19,7 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
)
@ -146,6 +147,51 @@ func TestMirror(t *testing.T) {
}
}
func TestLearn(t *testing.T) {
testCases := map[string]struct {
wantHash string
upstreamResponse []byte
upstreamStatusCode int
wantErr bool
}{
"http error": {
upstreamResponse: []byte("foo"), // ignored
upstreamStatusCode: http.StatusNotFound,
wantErr: true,
},
"success": {
wantHash: "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
upstreamResponse: []byte("foo"),
upstreamStatusCode: http.StatusOK,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
m := Maintainer{
unauthenticated: true,
httpClient: &http.Client{
Transport: &stubUpstream{
statusCode: tc.upstreamStatusCode,
body: tc.upstreamResponse,
},
},
log: logger.NewTest(t),
}
gotHash, err := m.Learn(context.Background(), []string{"https://example.com/foo"})
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal(tc.wantHash, gotHash)
})
}
}
func TestCheck(t *testing.T) {
testCases := map[string]struct {
hash string

View File

@ -34,7 +34,7 @@ ruleLoop:
}
}
}
return
return rules
}
// ValidatePinned checks if the given rule is a pinned dependency rule.
@ -78,7 +78,7 @@ func ValidatePinned(rule *build.Rule) (validationErrs []error) {
validationErrs = append(validationErrs, errors.New("rule has empty sha256 attribute"))
}
}
return
return validationErrs
}
// Check checks if a dependency rule is normalized and contains a mirror url.
@ -107,7 +107,7 @@ func Check(rule *build.Rule) (validationErrs []error) {
if rule.Kind() == "rpm" && len(urls) != 1 {
validationErrs = append(validationErrs, errors.New("rpm rule has unstable urls that are not the edgeless mirror"))
}
return
return validationErrs
}
// Normalize normalizes a rule and returns true if the rule was changed.
@ -122,11 +122,10 @@ func Normalize(rule *build.Rule) (changed bool) {
sortURLs(normalizedURLS)
normalizedURLS = deduplicateURLs(normalizedURLS)
if slices.Equal(urls, normalizedURLS) && rule.Attr("url") == nil {
return
return changed
}
setURLs(rule, normalizedURLS)
changed = true
return
return true
}
// AddURLs adds a url to a rule.
@ -147,6 +146,11 @@ func GetHash(rule *build.Rule) (string, error) {
return hash, nil
}
// SetHash sets the sha256 hash of a rule.
func SetHash(rule *build.Rule, hash string) {
rule.SetAttr("sha256", &build.StringExpr{Value: hash})
}
// GetURLs returns the sorted urls of a rule.
func GetURLs(rule *build.Rule) []string {
urls := rule.AttrStrings("urls")
@ -157,12 +161,42 @@ func GetURLs(rule *build.Rule) []string {
return urls
}
// HasMirrorURL returns true if the rule has a url from the Edgeless mirror.
// HasMirrorURL returns true if the rule has a url from the Edgeless mirror
// with the correct hash.
func HasMirrorURL(rule *build.Rule) bool {
_, err := mirrorURL(rule)
return err == nil
}
// PrepareUpgrade prepares a rule for an upgrade
// by removing all urls that are not upstream urls.
// and removing the hash attribute.
// it returns true if the rule was changed.
func PrepareUpgrade(rule *build.Rule) (changed bool, err error) {
upstreamURLs, err := UpstreamURLs(rule)
if err != nil {
return false, err
}
setURLs(rule, upstreamURLs)
rule.DelAttr("sha256")
return true, nil
}
// UpstreamURLs returns the upstream urls (non-mirror urls) of a rule.
func UpstreamURLs(rule *build.Rule) (urls []string, err error) {
urls = GetURLs(rule)
var upstreamURLs []string
for _, url := range urls {
if isUpstreamURL(url) {
upstreamURLs = append(upstreamURLs, url)
}
}
if len(upstreamURLs) == 0 {
return nil, ErrNoUpstreamURL
}
return upstreamURLs, nil
}
func deduplicateURLs(urls []string) (deduplicated []string) {
seen := make(map[string]bool)
for _, url := range urls {
@ -171,7 +205,7 @@ func deduplicateURLs(urls []string) (deduplicated []string) {
seen[url] = true
}
}
return
return deduplicated
}
// addTypeAttribute adds the type attribute to http_archive rules if it is missing.
@ -243,9 +277,13 @@ urlLoop:
// mirrorURL returns the first mirror URL for a rule.
func mirrorURL(rule *build.Rule) (string, error) {
hash, err := GetHash(rule)
if err != nil {
return "", err
}
urls := GetURLs(rule)
for _, url := range urls {
if strings.HasPrefix(url, edgelessMirrorPrefix) {
if strings.HasPrefix(url, edgelessMirrorPrefix) && strings.HasSuffix(url, hash) {
return url, nil
}
}
@ -284,12 +322,21 @@ func sortURLs(urls []string) {
})
}
// SupportedRules is a list of all rules that can be mirrored.
var SupportedRules = []string{
func isUpstreamURL(url string) bool {
return !strings.HasPrefix(url, bazelMirrorPrefix) && !strings.HasPrefix(url, edgelessMirrorPrefix)
}
var (
// SupportedRules is a list of all rules that can be mirrored.
SupportedRules = []string{
"http_archive",
"http_file",
"rpm",
}
}
// ErrNoUpstreamURL is returned when a rule has no upstream URL.
ErrNoUpstreamURL = errors.New("rule has no upstream URL")
)
const (
bazelMirrorPrefix = "https://mirror.bazel.build/"

View File

@ -448,3 +448,54 @@ http_archive(
_, err = GetHash(rules[1])
assert.Error(err)
}
func TestPrepareUpgrade(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
rule := `
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "foo_archive",
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
urls = [
"https://cdn.confidential.cloud/constellation/cas/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
"https://mirror.bazel.build/example.com/foo.tar.gz",
"https://example.com/foo.tar.gz",
],
type = "tar.gz",
)
http_archive(
name = "bar_archive",
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
urls = [
"https://cdn.confidential.cloud/constellation/cas/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
"https://mirror.bazel.build/example.com/foo.tar.gz",
],
type = "tar.gz",
)
`
bf, err := build.Parse("foo.bzl", []byte(rule))
if err != nil {
t.Fatal(err)
}
rules := Rules(bf, SupportedRules)
require.Len(rules, 2)
changed, err := PrepareUpgrade(rules[0])
assert.NoError(err)
assert.True(changed)
urls := GetURLs(rules[0])
assert.Equal(1, len(urls))
assert.Equal("https://example.com/foo.tar.gz", urls[0])
hash, err := GetHash(rules[0])
assert.Empty(hash)
assert.Error(err)
changed, err = PrepareUpgrade(rules[1])
assert.ErrorIs(err, ErrNoUpstreamURL)
assert.False(changed)
}

View File

@ -0,0 +1,221 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package main
import (
"context"
"errors"
"github.com/bazelbuild/buildtools/build"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror"
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
func newUpgradeCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "upgrade",
Short: "upgrade all Bazel dependency rules by recalculating expected hashes, uploading artifacts to the mirror (if needed) and formatting the rules.",
RunE: runUpgrade,
}
cmd.Flags().Bool("unauthenticated", false, "Doesn't require authentication to the mirror but cannot upload files.")
cmd.Flags().Bool("dry-run", false, "Don't actually change files or upload anything.")
return cmd
}
func runUpgrade(cmd *cobra.Command, _ []string) error {
flags, err := parseUpgradeFlags(cmd)
if err != nil {
return err
}
log := logger.New(logger.PlainLog, flags.logLevel)
log.Debugf("Parsed flags: %+v", flags)
fileHelper, err := bazelfiles.New()
if err != nil {
return err
}
log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
bazelFiles, err := fileHelper.FindFiles()
if err != nil {
return err
}
var mirrorUpload mirrorUploader
switch {
case flags.unauthenticated:
log.Warnf("Upgrading rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.")
mirrorUpload = mirror.NewUnauthenticated(flags.mirrorBaseURL, flags.dryRun, log)
default:
log.Debugf("Upgrading rules with authentication for AWS S3.")
mirrorUpload, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, flags.dryRun, log)
if err != nil {
return err
}
}
issues := issues.New()
for _, bazelFile := range bazelFiles {
fileIssues, err := upgradeBazelFile(cmd.Context(), fileHelper, mirrorUpload, bazelFile, flags.dryRun, log)
if err != nil {
return err
}
if len(fileIssues) > 0 {
issues.Set(bazelFile.AbsPath, fileIssues)
}
}
if len(issues) > 0 {
log.Warnf("Found %d issues in rules", len(issues))
issues.Report(cmd.OutOrStdout())
return errors.New("found issues in rules")
}
log.Infof("No issues found")
return nil
}
func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *logger.Logger) (iss issues.ByFile, err error) {
iss = issues.NewByFile()
var changed bool // true if any rule in this file was changed
log.Infof("Checking file: %s", bazelFile.RelPath)
buildfile, err := fileHelper.LoadFile(bazelFile)
if err != nil {
return iss, err
}
found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 {
log.Debugf("No rules found in file: %s", bazelFile.RelPath)
return iss, nil
}
log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath)
for _, rule := range found {
changedRule, ruleIssues := upgradeRule(ctx, mirrorUpload, rule, log)
if len(ruleIssues) > 0 {
iss.Add(rule.Name(), ruleIssues...)
}
changed = changed || changedRule
}
if len(iss) > 0 {
log.Warnf("File %s has issues. Not saving!", bazelFile.RelPath)
return iss, nil
}
if !changed {
log.Debugf("No changes to file: %s", bazelFile.RelPath)
return iss, nil
}
if dryRun {
diff, err := fileHelper.Diff(bazelFile, buildfile)
if err != nil {
return iss, err
}
log.Infof("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
return iss, nil
}
log.Infof("Saving updated file: %s", bazelFile.RelPath)
if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil {
return iss, err
}
return iss, nil
}
func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) (changed bool, iss []error) {
log.Debugf("Upgrading rule: %s", rule.Name())
upstreamURLs, err := rules.UpstreamURLs(rule)
if errors.Is(err, rules.ErrNoUpstreamURL) {
log.Debugf("Rule has no upstream URL. Skipping.")
return false, nil
} else if err != nil {
iss = append(iss, err)
return false, iss
}
// learn the hash of the upstream artifact
learnedHash, learnErr := mirrorUpload.Learn(ctx, upstreamURLs)
if learnErr != nil {
iss = append(iss, learnErr)
return false, iss
}
existingHash, err := rules.GetHash(rule)
if err == nil && learnedHash == existingHash {
log.Debugf("Rule already upgraded. Skipping.")
return false, nil
}
changed, err = rules.PrepareUpgrade(rule)
if err != nil {
iss = append(iss, err)
return changed, iss
}
rules.SetHash(rule, learnedHash)
changed = true
if _, fixErr := fixRule(ctx, mirrorUpload, rule, log); fixErr != nil {
iss = append(iss, fixErr...)
return changed, iss
}
return changed, iss
}
type upgradeFlags struct {
unauthenticated bool
dryRun bool
region string
bucket string
mirrorBaseURL string
logLevel zapcore.Level
}
func parseUpgradeFlags(cmd *cobra.Command) (upgradeFlags, error) {
unauthenticated, err := cmd.Flags().GetBool("unauthenticated")
if err != nil {
return upgradeFlags{}, err
}
dryRun, err := cmd.Flags().GetBool("dry-run")
if err != nil {
return upgradeFlags{}, err
}
verbose, err := cmd.Flags().GetBool("verbose")
if err != nil {
return upgradeFlags{}, err
}
logLevel := zapcore.InfoLevel
if verbose {
logLevel = zapcore.DebugLevel
}
region, err := cmd.Flags().GetString("region")
if err != nil {
return upgradeFlags{}, err
}
bucket, err := cmd.Flags().GetString("bucket")
if err != nil {
return upgradeFlags{}, err
}
mirrorBaseURL, err := cmd.Flags().GetString("mirror-base-url")
if err != nil {
return upgradeFlags{}, err
}
return upgradeFlags{
unauthenticated: unauthenticated,
dryRun: dryRun,
region: region,
bucket: bucket,
mirrorBaseURL: mirrorBaseURL,
logLevel: logLevel,
}, nil
}