mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-08-07 22:42:22 -04:00
bazel: deps mirror (#1522)
bazel-deps-mirror is an internal tools used to upload external dependencies that are referenced in the Bazel WORKSPACE to the Edgeless Systems' mirror. It also normalizes deps rules. * hack: add tool to mirror Bazel dependencies * hack: bazel-deps-mirror tests * bazel: add deps mirror commands * ci: upload Bazel dependencies on renovate PRs * update go mod * run deps_mirror_upload Signed-off-by: Paul Meyer <49727155+katexochen@users.noreply.github.com> Co-authored-by: Paul Meyer <49727155+katexochen@users.noreply.github.com>
This commit is contained in:
parent
d3e2f30f7b
commit
827c4f548d
36 changed files with 2698 additions and 529 deletions
28
hack/bazel-deps-mirror/BUILD.bazel
Normal file
28
hack/bazel-deps-mirror/BUILD.bazel
Normal file
|
@ -0,0 +1,28 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "bazel-deps-mirror_lib",
|
||||
srcs = [
|
||||
"bazel-deps-mirror.go",
|
||||
"check.go",
|
||||
"fix.go",
|
||||
],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//hack/bazel-deps-mirror/internal/bazelfiles",
|
||||
"//hack/bazel-deps-mirror/internal/issues",
|
||||
"//hack/bazel-deps-mirror/internal/mirror",
|
||||
"//hack/bazel-deps-mirror/internal/rules",
|
||||
"//internal/logger",
|
||||
"@com_github_bazelbuild_buildtools//build",
|
||||
"@com_github_spf13_cobra//:cobra",
|
||||
"@org_uber_go_zap//zapcore",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "bazel-deps-mirror",
|
||||
embed = [":bazel-deps-mirror_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
9
hack/bazel-deps-mirror/README.md
Normal file
9
hack/bazel-deps-mirror/README.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
# Bazel deps mirror
|
||||
|
||||
This directory contains tooling to automatically mirror the dependencies of a Bazel project into the Constellation CDN at `https://cdn.confidential.cloud/`.
|
||||
|
||||
The tool searches for various rules in the WORKSPACE.bazel file and all loaded .bzl files.
|
||||
It has the following commands:
|
||||
|
||||
- check: checks if the dependencies all have a mirror URL and optionally checks if the mirror really returns the expected file
|
||||
- mirror: mirrors all dependencies that don't have a mirror URL yet. Also normalizes the `urls` attribute of rules
|
82
hack/bazel-deps-mirror/bazel-deps-mirror.go
Normal file
82
hack/bazel-deps-mirror/bazel-deps-mirror.go
Normal file
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
// bazel-deps-mirror adds external dependencies to edgeless systems' mirror.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func execute() error {
|
||||
rootCmd := newRootCmd()
|
||||
ctx, cancel := signalContext(context.Background(), os.Interrupt)
|
||||
defer cancel()
|
||||
return rootCmd.ExecuteContext(ctx)
|
||||
}
|
||||
|
||||
func newRootCmd() *cobra.Command {
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "bazel-deps-mirror",
|
||||
Short: "Add external Bazel dependencies to edgeless systems' mirror",
|
||||
Long: "Add external Bazel dependencies to edgeless systems' mirror.",
|
||||
PersistentPreRun: preRunRoot,
|
||||
}
|
||||
|
||||
rootCmd.SetOut(os.Stdout)
|
||||
|
||||
rootCmd.PersistentFlags().Bool("verbose", false, "Enable verbose output")
|
||||
rootCmd.PersistentFlags().String("region", "eu-central-1", "AWS region of the API S3 bucket")
|
||||
rootCmd.PersistentFlags().String("bucket", "cdn-constellation-backend", "S3 bucket name of the API")
|
||||
rootCmd.PersistentFlags().String("mirror-base-url", "https://cdn.confidential.cloud", "Base URL of the public mirror endpoint")
|
||||
|
||||
rootCmd.AddCommand(newCheckCmd())
|
||||
rootCmd.AddCommand(newFixCmd())
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
// signalContext returns a context that is canceled on the handed signal.
|
||||
// The signal isn't watched after its first occurrence. Call the cancel
|
||||
// function to ensure the internal goroutine is stopped and the signal isn't
|
||||
// watched any longer.
|
||||
func signalContext(ctx context.Context, sig os.Signal) (context.Context, context.CancelFunc) {
|
||||
sigCtx, stop := signal.NotifyContext(ctx, sig)
|
||||
done := make(chan struct{}, 1)
|
||||
stopDone := make(chan struct{}, 1)
|
||||
|
||||
go func() {
|
||||
defer func() { stopDone <- struct{}{} }()
|
||||
defer stop()
|
||||
select {
|
||||
case <-sigCtx.Done():
|
||||
fmt.Println(" Signal caught. Press ctrl+c again to terminate the program immediately.")
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
|
||||
cancelFunc := func() {
|
||||
done <- struct{}{}
|
||||
<-stopDone
|
||||
}
|
||||
|
||||
return sigCtx, cancelFunc
|
||||
}
|
||||
|
||||
func preRunRoot(cmd *cobra.Command, _ []string) {
|
||||
cmd.SilenceUsage = true
|
||||
}
|
184
hack/bazel-deps-mirror/check.go
Normal file
184
hack/bazel-deps-mirror/check.go
Normal file
|
@ -0,0 +1,184 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
|
||||
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues"
|
||||
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror"
|
||||
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
func newCheckCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "check",
|
||||
Short: "Check if all Bazel dependencies are mirrored and the corresponding rules are properly formatted.",
|
||||
RunE: runCheck,
|
||||
}
|
||||
|
||||
cmd.Flags().Bool("mirror", false, "Performs authenticated checks to validate if all referenced CAS objects are still consistent within the mirror.")
|
||||
cmd.Flags().Bool("mirror-unauthenticated", false, "Performs unauthenticated, slow checks to validate if all referenced CAS objects are still consistent within the mirror. Doesn't require authentication to the mirror but may be inefficient.")
|
||||
cmd.MarkFlagsMutuallyExclusive("mirror", "mirror-unauthenticated")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runCheck(cmd *cobra.Command, _ []string) error {
|
||||
flags, err := parseCheckFlags(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log := logger.New(logger.PlainLog, flags.logLevel)
|
||||
log.Debugf("Parsed flags: %+v", flags)
|
||||
|
||||
filesHelper, err := bazelfiles.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
|
||||
bazelFiles, err := filesHelper.FindFiles()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var mirrorCheck mirrorChecker
|
||||
switch {
|
||||
case flags.mirrorUnauthenticated:
|
||||
log.Debugf("Checking consistency of all referenced CAS objects without authentication.")
|
||||
mirrorCheck = mirror.NewUnauthenticated(flags.mirrorBaseURL, mirror.Run, log)
|
||||
case flags.mirror:
|
||||
log.Debugf("Checking consistency of all referenced CAS objects using AWS S3.")
|
||||
mirrorCheck, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, mirror.Run, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
mirrorCheck = &noOpMirrorChecker{}
|
||||
}
|
||||
|
||||
iss := issues.New()
|
||||
for _, bazelFile := range bazelFiles {
|
||||
issByFile, err := checkBazelFile(cmd.Context(), filesHelper, mirrorCheck, bazelFile, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(issByFile) > 0 {
|
||||
iss.Set(bazelFile.AbsPath, issByFile)
|
||||
}
|
||||
}
|
||||
if len(iss) > 0 {
|
||||
log.Infof("Found issues in rules")
|
||||
iss.Report(cmd.OutOrStdout())
|
||||
return errors.New("found issues in rules")
|
||||
}
|
||||
|
||||
log.Infof("No issues found 🦭")
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *logger.Logger) (issByFile issues.ByFile, err error) {
|
||||
log.Debugf("Checking file: %s", bazelFile.RelPath)
|
||||
issByFile = issues.NewByFile()
|
||||
buildfile, err := fileHelper.LoadFile(bazelFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
found := rules.Rules(buildfile, rules.SupportedRules)
|
||||
if len(found) == 0 {
|
||||
log.Debugf("No rules found in file: %s", bazelFile.RelPath)
|
||||
return
|
||||
}
|
||||
log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath)
|
||||
for _, rule := range found {
|
||||
log.Debugf("Checking rule: %s", rule.Name())
|
||||
// check if the rule is a valid pinned dependency rule (has all required attributes)
|
||||
if issues := rules.ValidatePinned(rule); len(issues) > 0 {
|
||||
issByFile.Add(rule.Name(), issues...)
|
||||
continue
|
||||
}
|
||||
// check if the rule is a valid mirror rule
|
||||
if issues := rules.Check(rule); len(issues) > 0 {
|
||||
issByFile.Add(rule.Name(), issues...)
|
||||
}
|
||||
|
||||
// check if the referenced CAS object is still consistent
|
||||
// may be a no-op if --check-consistency is not set
|
||||
expectedHash, expectedHashErr := rules.GetHash(rule)
|
||||
if expectedHashErr == nil && rules.HasMirrorURL(rule) {
|
||||
if issue := mirrorCheck.Check(ctx, expectedHash); issue != nil {
|
||||
issByFile.Add(rule.Name(), issue)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type checkFlags struct {
|
||||
mirrorUnauthenticated bool
|
||||
mirror bool
|
||||
region string
|
||||
bucket string
|
||||
mirrorBaseURL string
|
||||
logLevel zapcore.Level
|
||||
}
|
||||
|
||||
func parseCheckFlags(cmd *cobra.Command) (checkFlags, error) {
|
||||
mirrorUnauthenticated, err := cmd.Flags().GetBool("mirror-unauthenticated")
|
||||
if err != nil {
|
||||
return checkFlags{}, err
|
||||
}
|
||||
mirror, err := cmd.Flags().GetBool("mirror")
|
||||
if err != nil {
|
||||
return checkFlags{}, err
|
||||
}
|
||||
verbose, err := cmd.Flags().GetBool("verbose")
|
||||
if err != nil {
|
||||
return checkFlags{}, err
|
||||
}
|
||||
logLevel := zapcore.InfoLevel
|
||||
if verbose {
|
||||
logLevel = zapcore.DebugLevel
|
||||
}
|
||||
region, err := cmd.Flags().GetString("region")
|
||||
if err != nil {
|
||||
return checkFlags{}, err
|
||||
}
|
||||
bucket, err := cmd.Flags().GetString("bucket")
|
||||
if err != nil {
|
||||
return checkFlags{}, err
|
||||
}
|
||||
mirrorBaseURL, err := cmd.Flags().GetString("mirror-base-url")
|
||||
if err != nil {
|
||||
return checkFlags{}, err
|
||||
}
|
||||
|
||||
return checkFlags{
|
||||
mirrorUnauthenticated: mirrorUnauthenticated,
|
||||
mirror: mirror,
|
||||
region: region,
|
||||
bucket: bucket,
|
||||
mirrorBaseURL: mirrorBaseURL,
|
||||
logLevel: logLevel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type mirrorChecker interface {
|
||||
Check(ctx context.Context, expectedHash string) error
|
||||
}
|
||||
|
||||
type noOpMirrorChecker struct{}
|
||||
|
||||
func (m *noOpMirrorChecker) Check(ctx context.Context, expectedHash string) error {
|
||||
return nil
|
||||
}
|
230
hack/bazel-deps-mirror/fix.go
Normal file
230
hack/bazel-deps-mirror/fix.go
Normal file
|
@ -0,0 +1,230 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/bazelbuild/buildtools/build"
|
||||
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles"
|
||||
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues"
|
||||
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror"
|
||||
"github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
func newFixCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "fix",
|
||||
Short: "fix all Bazel dependency rules by uploading artifacts to the mirror (if needed) and formatting the rules.",
|
||||
RunE: runFix,
|
||||
}
|
||||
|
||||
cmd.Flags().Bool("unauthenticated", false, "Doesn't require authentication to the mirror but cannot upload files.")
|
||||
cmd.Flags().Bool("dry-run", false, "Don't actually change files or upload anything.")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runFix(cmd *cobra.Command, _ []string) error {
|
||||
flags, err := parseFixFlags(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log := logger.New(logger.PlainLog, flags.logLevel)
|
||||
log.Debugf("Parsed flags: %+v", flags)
|
||||
|
||||
fileHelper, err := bazelfiles.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...")
|
||||
bazelFiles, err := fileHelper.FindFiles()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var mirrorUpload mirrorUploader
|
||||
switch {
|
||||
case flags.unauthenticated:
|
||||
log.Warnf("Fixing rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.")
|
||||
mirrorUpload = mirror.NewUnauthenticated(flags.mirrorBaseURL, flags.dryRun, log)
|
||||
default:
|
||||
log.Debugf("Fixing rules with authentication for AWS S3.")
|
||||
mirrorUpload, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, flags.dryRun, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
issues := issues.New()
|
||||
for _, bazelFile := range bazelFiles {
|
||||
fileIssues, err := fixBazelFile(cmd.Context(), fileHelper, mirrorUpload, bazelFile, flags.dryRun, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(fileIssues) > 0 {
|
||||
issues.Set(bazelFile.AbsPath, fileIssues)
|
||||
}
|
||||
}
|
||||
if len(issues) > 0 {
|
||||
log.Warnf("Found %d unfixable issues in rules", len(issues))
|
||||
issues.Report(cmd.OutOrStdout())
|
||||
return errors.New("found issues in rules")
|
||||
}
|
||||
|
||||
log.Infof("No unfixable issues found")
|
||||
return nil
|
||||
}
|
||||
|
||||
func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *logger.Logger) (iss issues.ByFile, err error) {
|
||||
iss = issues.NewByFile()
|
||||
var changed bool // true if any rule in this file was changed
|
||||
log.Infof("Checking file: %s", bazelFile.RelPath)
|
||||
buildfile, err := fileHelper.LoadFile(bazelFile)
|
||||
if err != nil {
|
||||
return iss, err
|
||||
}
|
||||
found := rules.Rules(buildfile, rules.SupportedRules)
|
||||
if len(found) == 0 {
|
||||
log.Debugf("No rules found in file: %s", bazelFile.RelPath)
|
||||
return iss, nil
|
||||
}
|
||||
log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath)
|
||||
for _, rule := range found {
|
||||
changedRule, ruleIssues := fixRule(ctx, mirrorUpload, rule, log)
|
||||
if len(ruleIssues) > 0 {
|
||||
iss.Add(rule.Name(), ruleIssues...)
|
||||
}
|
||||
changed = changed || changedRule
|
||||
}
|
||||
|
||||
if len(iss) > 0 {
|
||||
log.Warnf("File %s has issues. Not saving!", bazelFile.RelPath)
|
||||
return iss, nil
|
||||
}
|
||||
if !changed {
|
||||
log.Debugf("No changes to file: %s", bazelFile.RelPath)
|
||||
return iss, nil
|
||||
}
|
||||
if dryRun {
|
||||
diff, err := fileHelper.Diff(bazelFile, buildfile)
|
||||
if err != nil {
|
||||
return iss, err
|
||||
}
|
||||
log.Infof("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)
|
||||
return iss, nil
|
||||
}
|
||||
log.Infof("Saving updated file: %s", bazelFile.RelPath)
|
||||
if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil {
|
||||
return iss, err
|
||||
}
|
||||
|
||||
return iss, nil
|
||||
}
|
||||
|
||||
func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) (changed bool, iss []error) {
|
||||
log.Debugf("Fixing rule: %s", rule.Name())
|
||||
// check if the rule is a valid pinned dependency rule (has all required attributes)
|
||||
issue := rules.ValidatePinned(rule)
|
||||
if issue != nil {
|
||||
// don't try to fix the rule if it's invalid
|
||||
iss = append(iss, issue...)
|
||||
return
|
||||
}
|
||||
|
||||
// check if the referenced CAS object exists in the mirror and is consistent
|
||||
expectedHash, expectedHashErr := rules.GetHash(rule)
|
||||
if expectedHashErr != nil {
|
||||
// don't try to fix the rule if the hash is missing
|
||||
iss = append(iss,
|
||||
errors.New("hash attribute is missing. unable to check if the artifact is already mirrored or upload it"))
|
||||
return
|
||||
}
|
||||
|
||||
if rules.HasMirrorURL(rule) {
|
||||
changed = rules.Normalize(rule)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash)
|
||||
if uploadErr := mirrorUpload.Mirror(ctx, expectedHash, rules.GetURLs(rule)); uploadErr != nil {
|
||||
// don't try to fix the rule if the upload failed
|
||||
iss = append(iss, uploadErr)
|
||||
return
|
||||
}
|
||||
// now the artifact is mirrored (if it wasn't already) and we can fix the rule
|
||||
mirrorURL, err := mirrorUpload.MirrorURL(expectedHash)
|
||||
if err != nil {
|
||||
iss = append(iss, err)
|
||||
return
|
||||
}
|
||||
rules.AddURLs(rule, []string{mirrorURL})
|
||||
|
||||
// normalize the rule
|
||||
rules.Normalize(rule)
|
||||
return true, iss
|
||||
}
|
||||
|
||||
type fixFlags struct {
|
||||
unauthenticated bool
|
||||
dryRun bool
|
||||
region string
|
||||
bucket string
|
||||
mirrorBaseURL string
|
||||
logLevel zapcore.Level
|
||||
}
|
||||
|
||||
func parseFixFlags(cmd *cobra.Command) (fixFlags, error) {
|
||||
unauthenticated, err := cmd.Flags().GetBool("unauthenticated")
|
||||
if err != nil {
|
||||
return fixFlags{}, err
|
||||
}
|
||||
dryRun, err := cmd.Flags().GetBool("dry-run")
|
||||
if err != nil {
|
||||
return fixFlags{}, err
|
||||
}
|
||||
verbose, err := cmd.Flags().GetBool("verbose")
|
||||
if err != nil {
|
||||
return fixFlags{}, err
|
||||
}
|
||||
logLevel := zapcore.InfoLevel
|
||||
if verbose {
|
||||
logLevel = zapcore.DebugLevel
|
||||
}
|
||||
region, err := cmd.Flags().GetString("region")
|
||||
if err != nil {
|
||||
return fixFlags{}, err
|
||||
}
|
||||
bucket, err := cmd.Flags().GetString("bucket")
|
||||
if err != nil {
|
||||
return fixFlags{}, err
|
||||
}
|
||||
mirrorBaseURL, err := cmd.Flags().GetString("mirror-base-url")
|
||||
if err != nil {
|
||||
return fixFlags{}, err
|
||||
}
|
||||
|
||||
return fixFlags{
|
||||
unauthenticated: unauthenticated,
|
||||
dryRun: dryRun,
|
||||
region: region,
|
||||
bucket: bucket,
|
||||
mirrorBaseURL: mirrorBaseURL,
|
||||
logLevel: logLevel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type mirrorUploader interface {
|
||||
Check(ctx context.Context, expectedHash string) error
|
||||
Mirror(ctx context.Context, hash string, urls []string) error
|
||||
MirrorURL(hash string) (string, error)
|
||||
}
|
30
hack/bazel-deps-mirror/internal/bazelfiles/BUILD.bazel
Normal file
30
hack/bazel-deps-mirror/internal/bazelfiles/BUILD.bazel
Normal file
|
@ -0,0 +1,30 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("//bazel/go:go_test.bzl", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "bazelfiles",
|
||||
srcs = ["files.go"],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles",
|
||||
visibility = ["//hack/bazel-deps-mirror:__subpackages__"],
|
||||
deps = [
|
||||
"@com_github_bazelbuild_buildtools//build",
|
||||
"@com_github_hexops_gotextdiff//:gotextdiff",
|
||||
"@com_github_hexops_gotextdiff//myers",
|
||||
"@com_github_hexops_gotextdiff//span",
|
||||
"@com_github_spf13_afero//:afero",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "bazelfiles_test",
|
||||
srcs = ["files_test.go"],
|
||||
embed = [":bazelfiles"],
|
||||
deps = [
|
||||
"@com_github_bazelbuild_buildtools//build",
|
||||
"@com_github_bazelbuild_buildtools//edit",
|
||||
"@com_github_spf13_afero//:afero",
|
||||
"@com_github_stretchr_testify//assert",
|
||||
"@com_github_stretchr_testify//require",
|
||||
"@org_uber_go_goleak//:goleak",
|
||||
],
|
||||
)
|
158
hack/bazel-deps-mirror/internal/bazelfiles/files.go
Normal file
158
hack/bazel-deps-mirror/internal/bazelfiles/files.go
Normal file
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
// package bazelfiles is used to find and handle Bazel WORKSPACE and bzl files.
|
||||
package bazelfiles
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/bazelbuild/buildtools/build"
|
||||
"github.com/hexops/gotextdiff"
|
||||
"github.com/hexops/gotextdiff/myers"
|
||||
"github.com/hexops/gotextdiff/span"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// Helper is used to find and handle Bazel WORKSPACE and bzl files.
|
||||
type Helper struct {
|
||||
fs afero.Fs
|
||||
workspaceRoot string
|
||||
}
|
||||
|
||||
// New creates a new BazelFilesHelper.
|
||||
func New() (*Helper, error) {
|
||||
workspaceRoot, err := findWorkspaceRoot(os.LookupEnv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Helper{
|
||||
fs: afero.NewBasePathFs(afero.NewOsFs(), workspaceRoot),
|
||||
workspaceRoot: workspaceRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FindFiles returns the paths to all Bazel files in the Bazel workspace.
|
||||
func (h *Helper) FindFiles() ([]BazelFile, error) {
|
||||
workspaceFile, err := h.findWorkspaceFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bzlFiles, err := h.findBzlFiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return append(bzlFiles, workspaceFile), nil
|
||||
}
|
||||
|
||||
// findWorkspaceFile returns the path to the Bazel WORKSPACE.bazel file (or WORKSPACE if the former doesn't exist).
|
||||
func (h *Helper) findWorkspaceFile() (BazelFile, error) {
|
||||
if _, err := h.fs.Stat("WORKSPACE.bazel"); err == nil {
|
||||
return BazelFile{
|
||||
RelPath: "WORKSPACE.bazel",
|
||||
AbsPath: filepath.Join(h.workspaceRoot, "WORKSPACE.bazel"),
|
||||
Type: BazelFileTypeWorkspace,
|
||||
}, nil
|
||||
}
|
||||
if _, err := h.fs.Stat("WORKSPACE"); err == nil {
|
||||
return BazelFile{
|
||||
RelPath: "WORKSPACE",
|
||||
AbsPath: filepath.Join(h.workspaceRoot, "WORKSPACE"),
|
||||
Type: BazelFileTypeWorkspace,
|
||||
}, nil
|
||||
}
|
||||
return BazelFile{}, fmt.Errorf("failed to find Bazel WORKSPACE file")
|
||||
}
|
||||
|
||||
// findBzlFiles returns the paths to all .bzl files in the Bazel workspace.
|
||||
func (h *Helper) findBzlFiles() ([]BazelFile, error) {
|
||||
var bzlFiles []BazelFile
|
||||
err := afero.Walk(h.fs, ".", func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if filepath.Ext(path) != ".bzl" {
|
||||
return nil
|
||||
}
|
||||
bzlFiles = append(bzlFiles, BazelFile{
|
||||
RelPath: path,
|
||||
AbsPath: filepath.Join(h.workspaceRoot, path),
|
||||
Type: BazelFileTypeBzl,
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bzlFiles, nil
|
||||
}
|
||||
|
||||
// LoadFile loads a Bazel file.
|
||||
func (h *Helper) LoadFile(bf BazelFile) (*build.File, error) {
|
||||
data, err := afero.ReadFile(h.fs, bf.RelPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch bf.Type {
|
||||
case BazelFileTypeBzl:
|
||||
return build.ParseBzl(bf.AbsPath, data)
|
||||
case BazelFileTypeWorkspace:
|
||||
return build.ParseWorkspace(bf.AbsPath, data)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown Bazel file type: %d", bf.Type)
|
||||
}
|
||||
|
||||
// WriteFile writes (updates) a Bazel file.
|
||||
func (h *Helper) WriteFile(bf BazelFile, buildfile *build.File) error {
|
||||
return afero.WriteFile(h.fs, bf.RelPath, build.Format(buildfile), 0o644)
|
||||
}
|
||||
|
||||
// Diff returns the diff between the saved and the updated (in-memory) version of a Bazel file.
|
||||
func (h *Helper) Diff(bf BazelFile, buildfile *build.File) (string, error) {
|
||||
savedData, err := afero.ReadFile(h.fs, bf.RelPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
updatedData := build.Format(buildfile)
|
||||
edits := myers.ComputeEdits(span.URIFromPath(bf.RelPath), string(savedData), string(updatedData))
|
||||
diff := fmt.Sprint(gotextdiff.ToUnified("a/"+bf.RelPath, "b/"+bf.RelPath, string(savedData), edits))
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
// findWorkspaceRoot returns the path to the Bazel workspace root.
|
||||
func findWorkspaceRoot(lookupEnv LookupEnv) (string, error) {
|
||||
workspaceRoot, ok := lookupEnv("BUILD_WORKSPACE_DIRECTORY")
|
||||
if !ok {
|
||||
return "", fmt.Errorf("failed to find Bazel workspace root: not executed via \"bazel run\" and BUILD_WORKSPACE_DIRECTORY not set")
|
||||
}
|
||||
return workspaceRoot, nil
|
||||
}
|
||||
|
||||
// BazelFile is a reference (path) to a Bazel file.
|
||||
type BazelFile struct {
|
||||
RelPath string
|
||||
AbsPath string
|
||||
Type BazelFileType
|
||||
}
|
||||
|
||||
// BazelFileType is the type of a Bazel file.
|
||||
type BazelFileType int
|
||||
|
||||
const (
|
||||
BazelFileTypeBzl = iota // BazelFileTypeBzl is a .bzl file
|
||||
BazelFileTypeWorkspace // BazelFileTypeWorkspace is a WORKSPACE or WORKSPACE.bazel file
|
||||
)
|
||||
|
||||
// LookupEnv can be the real os.LookupEnv or a mock for testing.
|
||||
type LookupEnv func(key string) (string, bool)
|
259
hack/bazel-deps-mirror/internal/bazelfiles/files_test.go
Normal file
259
hack/bazel-deps-mirror/internal/bazelfiles/files_test.go
Normal file
|
@ -0,0 +1,259 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package bazelfiles
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/bazelbuild/buildtools/build"
|
||||
"github.com/bazelbuild/buildtools/edit"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestFindFiles(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
files []string
|
||||
wantFiles []BazelFile
|
||||
wantErr bool
|
||||
}{
|
||||
"no WORKSPACE file": {
|
||||
files: []string{},
|
||||
wantFiles: []BazelFile{},
|
||||
wantErr: true,
|
||||
},
|
||||
"only WORKSPACE file": {
|
||||
files: []string{"WORKSPACE"},
|
||||
wantFiles: []BazelFile{
|
||||
{
|
||||
RelPath: "WORKSPACE",
|
||||
AbsPath: "/WORKSPACE",
|
||||
Type: BazelFileTypeWorkspace,
|
||||
},
|
||||
},
|
||||
},
|
||||
"only WORKSPACE.bazel file": {
|
||||
files: []string{"WORKSPACE.bazel"},
|
||||
wantFiles: []BazelFile{
|
||||
{
|
||||
RelPath: "WORKSPACE.bazel",
|
||||
AbsPath: "/WORKSPACE.bazel",
|
||||
Type: BazelFileTypeWorkspace,
|
||||
},
|
||||
},
|
||||
},
|
||||
"both WORKSPACE and WORKSPACE.bazel files": {
|
||||
files: []string{"WORKSPACE", "WORKSPACE.bazel"},
|
||||
wantFiles: []BazelFile{
|
||||
{
|
||||
RelPath: "WORKSPACE.bazel",
|
||||
AbsPath: "/WORKSPACE.bazel",
|
||||
Type: BazelFileTypeWorkspace,
|
||||
},
|
||||
},
|
||||
},
|
||||
"only .bzl file": {
|
||||
files: []string{"foo.bzl"},
|
||||
wantErr: true,
|
||||
},
|
||||
"all kinds": {
|
||||
files: []string{"WORKSPACE", "WORKSPACE.bazel", "foo.bzl", "bar.bzl", "unused.txt", "folder/baz.bzl"},
|
||||
wantFiles: []BazelFile{
|
||||
{
|
||||
RelPath: "WORKSPACE.bazel",
|
||||
AbsPath: "/WORKSPACE.bazel",
|
||||
Type: BazelFileTypeWorkspace,
|
||||
},
|
||||
{
|
||||
RelPath: "foo.bzl",
|
||||
AbsPath: "/foo.bzl",
|
||||
Type: BazelFileTypeBzl,
|
||||
},
|
||||
{
|
||||
RelPath: "bar.bzl",
|
||||
AbsPath: "/bar.bzl",
|
||||
Type: BazelFileTypeBzl,
|
||||
},
|
||||
{
|
||||
RelPath: "folder/baz.bzl",
|
||||
AbsPath: "/folder/baz.bzl",
|
||||
Type: BazelFileTypeBzl,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
fs := afero.NewMemMapFs()
|
||||
for _, file := range tc.files {
|
||||
_, err := fs.Create(file)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
helper := Helper{
|
||||
fs: fs,
|
||||
workspaceRoot: "/",
|
||||
}
|
||||
gotFiles, err := helper.FindFiles()
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.ElementsMatch(tc.wantFiles, gotFiles)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFile(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
file BazelFile
|
||||
contents string
|
||||
wantErr bool
|
||||
}{
|
||||
"file does not exist": {
|
||||
file: BazelFile{
|
||||
RelPath: "foo.bzl",
|
||||
AbsPath: "/foo.bzl",
|
||||
Type: BazelFileTypeBzl,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"file has unknown type": {
|
||||
file: BazelFile{
|
||||
RelPath: "foo.txt",
|
||||
AbsPath: "/foo.txt",
|
||||
Type: BazelFileType(999),
|
||||
},
|
||||
contents: "foo",
|
||||
wantErr: true,
|
||||
},
|
||||
"file is a bzl file": {
|
||||
file: BazelFile{
|
||||
RelPath: "foo.bzl",
|
||||
AbsPath: "/foo.bzl",
|
||||
Type: BazelFileTypeBzl,
|
||||
},
|
||||
contents: "load(\"bar.bzl\", \"bar\")",
|
||||
},
|
||||
"file is a workspace file": {
|
||||
file: BazelFile{
|
||||
RelPath: "WORKSPACE",
|
||||
AbsPath: "/WORKSPACE",
|
||||
Type: BazelFileTypeWorkspace,
|
||||
},
|
||||
contents: "workspace(name = \"foo\")",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
fs := afero.NewMemMapFs()
|
||||
if tc.contents != "" {
|
||||
err := afero.WriteFile(fs, tc.file.RelPath, []byte(tc.contents), 0o644)
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
helper := Helper{
|
||||
fs: fs,
|
||||
workspaceRoot: "/",
|
||||
}
|
||||
_, err := helper.LoadFile(tc.file)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadWriteFile(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
fs := afero.NewMemMapFs()
|
||||
err := afero.WriteFile(fs, "a.bzl", []byte("load(\"bar.bzl\", \"bar\")\n"), 0o644)
|
||||
require.NoError(err)
|
||||
helper := Helper{
|
||||
fs: fs,
|
||||
workspaceRoot: "/",
|
||||
}
|
||||
bf, err := helper.LoadFile(BazelFile{
|
||||
RelPath: "a.bzl",
|
||||
AbsPath: "/a.bzl",
|
||||
Type: BazelFileTypeBzl,
|
||||
})
|
||||
require.NoError(err)
|
||||
err = helper.WriteFile(BazelFile{
|
||||
RelPath: "b.bzl",
|
||||
AbsPath: "/b.bzl",
|
||||
Type: BazelFileTypeBzl,
|
||||
}, bf)
|
||||
require.NoError(err)
|
||||
_, err = fs.Stat("b.bzl")
|
||||
assert.NoError(err)
|
||||
contents, err := afero.ReadFile(fs, "b.bzl")
|
||||
assert.NoError(err)
|
||||
assert.Equal("load(\"bar.bzl\", \"bar\")\n", string(contents))
|
||||
}
|
||||
|
||||
func TestDiff(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
fs := afero.NewMemMapFs()
|
||||
err := afero.WriteFile(fs, "WORKSPACE.bazel", []byte(""), 0o644)
|
||||
require.NoError(err)
|
||||
helper := Helper{
|
||||
fs: fs,
|
||||
workspaceRoot: "/",
|
||||
}
|
||||
fileRef := BazelFile{
|
||||
RelPath: "WORKSPACE.bazel",
|
||||
AbsPath: "/WORKSPACE.bazel",
|
||||
Type: BazelFileTypeWorkspace,
|
||||
}
|
||||
bf, err := helper.LoadFile(fileRef)
|
||||
require.NoError(err)
|
||||
diff, err := helper.Diff(fileRef, bf)
|
||||
require.NoError(err)
|
||||
assert.Empty(diff)
|
||||
bf.Stmt = edit.InsertAtEnd(
|
||||
bf.Stmt,
|
||||
&build.CallExpr{
|
||||
X: &build.Ident{Name: "workspace"},
|
||||
List: []build.Expr{
|
||||
&build.AssignExpr{
|
||||
LHS: &build.Ident{Name: "name"},
|
||||
Op: "=",
|
||||
RHS: &build.StringExpr{Value: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
diff, err = helper.Diff(fileRef, bf)
|
||||
require.NoError(err)
|
||||
assert.Equal("--- a/WORKSPACE.bazel\n+++ b/WORKSPACE.bazel\n@@ -1 +1 @@\n+workspace(name = \"foo\")\n", diff)
|
||||
err = helper.WriteFile(fileRef, bf)
|
||||
require.NoError(err)
|
||||
contents, err := afero.ReadFile(fs, "WORKSPACE.bazel")
|
||||
assert.NoError(err)
|
||||
assert.Equal("workspace(name = \"foo\")\n", string(contents))
|
||||
diff, err = helper.Diff(fileRef, bf)
|
||||
require.NoError(err)
|
||||
assert.Empty(diff)
|
||||
}
|
19
hack/bazel-deps-mirror/internal/issues/BUILD.bazel
Normal file
19
hack/bazel-deps-mirror/internal/issues/BUILD.bazel
Normal file
|
@ -0,0 +1,19 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("//bazel/go:go_test.bzl", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "issues",
|
||||
srcs = ["issues.go"],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues",
|
||||
visibility = ["//hack/bazel-deps-mirror:__subpackages__"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "issues_test",
|
||||
srcs = ["issues_test.go"],
|
||||
embed = [":issues"],
|
||||
deps = [
|
||||
"@com_github_stretchr_testify//assert",
|
||||
"@org_uber_go_goleak//:goleak",
|
||||
],
|
||||
)
|
88
hack/bazel-deps-mirror/internal/issues/issues.go
Normal file
88
hack/bazel-deps-mirror/internal/issues/issues.go
Normal file
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
// package issues can store and report issues found during the bazel-deps-mirror process.
|
||||
package issues
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Map is a map of issues arranged by path => rulename => issues.
|
||||
type Map map[string]map[string][]error
|
||||
|
||||
// New creates a new Map.
|
||||
func New() Map {
|
||||
return make(map[string]map[string][]error)
|
||||
}
|
||||
|
||||
// Set sets all issues for a file.
|
||||
func (m Map) Set(file string, issues ByFile) {
|
||||
m[file] = issues
|
||||
}
|
||||
|
||||
// Report prints all issues to a writer in a human-readable format.
|
||||
func (m Map) Report(w io.Writer) {
|
||||
files := make([]string, 0, len(m))
|
||||
for f := range m {
|
||||
files = append(files, f)
|
||||
}
|
||||
sort.Strings(files)
|
||||
|
||||
for _, file := range files {
|
||||
rules := make([]string, 0, len(m[file]))
|
||||
for r := range m[file] {
|
||||
rules = append(rules, r)
|
||||
}
|
||||
sort.Strings(rules)
|
||||
|
||||
fmt.Fprintf(w, "File %s (%d issues total):\n", file, m.IssuesPerFile(file))
|
||||
for _, rule := range rules {
|
||||
ruleIssues := m[file][rule]
|
||||
if len(ruleIssues) == 0 {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(w, " Rule %s (%d issues total):\n", rule, m.IssuesPerRule(file, rule))
|
||||
for _, issue := range ruleIssues {
|
||||
fmt.Fprintf(w, " %s\n", issue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FileHasIssues returns true if the file has any issues.
|
||||
func (m Map) FileHasIssues(file string) bool {
|
||||
return m[file] != nil
|
||||
}
|
||||
|
||||
// IssuesPerFile returns the number of issues for a file.
|
||||
func (m Map) IssuesPerFile(file string) int {
|
||||
sum := 0
|
||||
for _, ruleIssues := range m[file] {
|
||||
sum += len(ruleIssues)
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
// IssuesPerRule returns the number of issues for a rule.
|
||||
func (m Map) IssuesPerRule(file string, rule string) int {
|
||||
return len(m[file][rule])
|
||||
}
|
||||
|
||||
// ByFile is a map of issues belonging to one file arranged by rulename => issues.
|
||||
type ByFile map[string][]error
|
||||
|
||||
// NewByFile creates a new ByFile.
|
||||
func NewByFile() ByFile {
|
||||
return make(map[string][]error)
|
||||
}
|
||||
|
||||
// Add adds one or more issues belonging to a rule.
|
||||
func (m ByFile) Add(rule string, issues ...error) {
|
||||
m[rule] = append(m[rule], issues...)
|
||||
}
|
46
hack/bazel-deps-mirror/internal/issues/issues_test.go
Normal file
46
hack/bazel-deps-mirror/internal/issues/issues_test.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package issues
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/goleak"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestMap(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
m := New()
|
||||
assert.Equal(0, len(m))
|
||||
assert.False(m.FileHasIssues("file1"))
|
||||
m.Set("file1", map[string][]error{
|
||||
"rule1": {errors.New("r1_issue1"), errors.New("r1_issue2")},
|
||||
"rule2": {errors.New("r2_issue1")},
|
||||
})
|
||||
assert.Equal(3, m.IssuesPerFile("file1"))
|
||||
assert.True(m.FileHasIssues("file1"))
|
||||
|
||||
// let report write to a buffer
|
||||
b := new(bytes.Buffer)
|
||||
m.Report(b)
|
||||
rep := b.String()
|
||||
assert.Equal(rep, `File file1 (3 issues total):
|
||||
Rule rule1 (2 issues total):
|
||||
r1_issue1
|
||||
r1_issue2
|
||||
Rule rule2 (1 issues total):
|
||||
r2_issue1
|
||||
`)
|
||||
}
|
30
hack/bazel-deps-mirror/internal/mirror/BUILD.bazel
Normal file
30
hack/bazel-deps-mirror/internal/mirror/BUILD.bazel
Normal file
|
@ -0,0 +1,30 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("//bazel/go:go_test.bzl", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "mirror",
|
||||
srcs = ["mirror.go"],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror",
|
||||
visibility = ["//hack/bazel-deps-mirror:__subpackages__"],
|
||||
deps = [
|
||||
"//internal/logger",
|
||||
"@com_github_aws_aws_sdk_go_v2_config//:config",
|
||||
"@com_github_aws_aws_sdk_go_v2_feature_s3_manager//:manager",
|
||||
"@com_github_aws_aws_sdk_go_v2_service_s3//:s3",
|
||||
"@com_github_aws_aws_sdk_go_v2_service_s3//types",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "mirror_test",
|
||||
srcs = ["mirror_test.go"],
|
||||
embed = [":mirror"],
|
||||
deps = [
|
||||
"//internal/logger",
|
||||
"@com_github_aws_aws_sdk_go_v2_feature_s3_manager//:manager",
|
||||
"@com_github_aws_aws_sdk_go_v2_service_s3//:s3",
|
||||
"@com_github_aws_aws_sdk_go_v2_service_s3//types",
|
||||
"@com_github_stretchr_testify//assert",
|
||||
"@org_uber_go_goleak//:goleak",
|
||||
],
|
||||
)
|
270
hack/bazel-deps-mirror/internal/mirror/mirror.go
Normal file
270
hack/bazel-deps-mirror/internal/mirror/mirror.go
Normal file
|
@ -0,0 +1,270 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
// package mirror is used upload and download Bazel dependencies to and from a mirror.
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
)
|
||||
|
||||
// Maintainer can upload and download files to and from a CAS mirror.
|
||||
type Maintainer struct {
|
||||
objectStorageClient objectStorageClient
|
||||
uploadClient uploadClient
|
||||
httpClient httpClient
|
||||
// bucket is the name of the S3 bucket to use.
|
||||
bucket string
|
||||
// mirrorBaseURL is the base URL of the public CAS http endpoint.
|
||||
mirrorBaseURL string
|
||||
|
||||
unauthenticated bool
|
||||
dryRun bool
|
||||
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// NewUnauthenticated creates a new Maintainer that dose not require authentication can only download files from a CAS mirror.
|
||||
func NewUnauthenticated(mirrorBaseURL string, dryRun bool, log *logger.Logger) *Maintainer {
|
||||
return &Maintainer{
|
||||
httpClient: http.DefaultClient,
|
||||
mirrorBaseURL: mirrorBaseURL,
|
||||
unauthenticated: true,
|
||||
dryRun: dryRun,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Maintainer that can upload and download files to and from a CAS mirror.
|
||||
func New(ctx context.Context, region, bucket, mirrorBaseURL string, dryRun bool, log *logger.Logger) (*Maintainer, error) {
|
||||
cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s3C := s3.NewFromConfig(cfg)
|
||||
uploadC := s3manager.NewUploader(s3C)
|
||||
|
||||
return &Maintainer{
|
||||
objectStorageClient: s3C,
|
||||
uploadClient: uploadC,
|
||||
bucket: bucket,
|
||||
mirrorBaseURL: mirrorBaseURL,
|
||||
httpClient: http.DefaultClient,
|
||||
dryRun: dryRun,
|
||||
log: log,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MirrorURL returns the public URL of a file in the CAS mirror.
|
||||
func (m *Maintainer) MirrorURL(hash string) (string, error) {
|
||||
if _, err := hex.DecodeString(hash); err != nil {
|
||||
return "", fmt.Errorf("invalid hash %q: %w", hash, err)
|
||||
}
|
||||
key := path.Join(keyBase, hash)
|
||||
pubURL, err := url.Parse(m.mirrorBaseURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
pubURL.Path = path.Join(pubURL.Path, key)
|
||||
return pubURL.String(), nil
|
||||
}
|
||||
|
||||
// Mirror downloads a file from one of the existing (non-mirror) urls and uploads it to the CAS mirror.
|
||||
// It also calculates the hash of the file during streaming and checks if it matches the expected hash.
|
||||
func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) error {
|
||||
if m.unauthenticated {
|
||||
return errors.New("cannot upload in unauthenticated mode")
|
||||
}
|
||||
|
||||
for _, url := range urls {
|
||||
m.log.Debugf("Mirroring file with hash %v from %q", hash, url)
|
||||
body, err := m.downloadFromUpstream(ctx, url)
|
||||
if err != nil {
|
||||
m.log.Debugf("Failed to download file from %q: %v", url, err)
|
||||
continue
|
||||
}
|
||||
defer body.Close()
|
||||
streamedHash := sha256.New()
|
||||
tee := io.TeeReader(body, streamedHash)
|
||||
if err := m.put(ctx, hash, tee); err != nil {
|
||||
m.log.Warnf("Failed to stream file from upstream %q to mirror: %v.. Trying next url.", url, err)
|
||||
continue
|
||||
}
|
||||
actualHash := hex.EncodeToString(streamedHash.Sum(nil))
|
||||
|
||||
if actualHash != hash {
|
||||
return fmt.Errorf("hash mismatch while streaming file to mirror: expected %v, got %v", hash, actualHash)
|
||||
}
|
||||
pubURL, err := m.MirrorURL(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.log.Debugf("File uploaded successfully to mirror from %q as %q", url, pubURL)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to download / reupload file with hash %v from any of the urls: %v", hash, urls)
|
||||
}
|
||||
|
||||
// Check checks if a file is present and has the correct hash in the CAS mirror.
|
||||
func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
|
||||
m.log.Debugf("Checking consistency of object with hash %v", expectedHash)
|
||||
if m.unauthenticated {
|
||||
return m.checkUnauthenticated(ctx, expectedHash)
|
||||
}
|
||||
return m.checkAuthenticated(ctx, expectedHash)
|
||||
}
|
||||
|
||||
// checkReadonly checks if a file is present and has the correct hash in the CAS mirror.
|
||||
// It uses the authenticated CAS s3 endpoint to download the file metadata.
|
||||
func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string) error {
|
||||
key := path.Join(keyBase, expectedHash)
|
||||
m.log.Debugf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key)
|
||||
attributes, err := m.objectStorageClient.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
|
||||
Bucket: &m.bucket,
|
||||
Key: &key,
|
||||
ObjectAttributes: []s3types.ObjectAttributes{s3types.ObjectAttributesChecksum, s3types.ObjectAttributesObjectParts},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hasChecksum := attributes.Checksum != nil && attributes.Checksum.ChecksumSHA256 != nil && len(*attributes.Checksum.ChecksumSHA256) > 0
|
||||
isSinglePart := attributes.ObjectParts == nil || attributes.ObjectParts.TotalPartsCount == 1
|
||||
|
||||
if !hasChecksum || !isSinglePart {
|
||||
// checksums are not guaranteed to be present
|
||||
// and if present, they are only meaningful for single part objects
|
||||
// fallback if checksum cannot be verified from attributes
|
||||
m.log.Debugf("S3 object attributes cannot be used to verify key %v. Falling back to download.", key)
|
||||
return m.checkUnauthenticated(ctx, expectedHash)
|
||||
}
|
||||
|
||||
actualHash, err := base64.StdEncoding.DecodeString(*attributes.Checksum.ChecksumSHA256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return compareHashes(expectedHash, actualHash)
|
||||
}
|
||||
|
||||
// checkReadonly checks if a file is present and has the correct hash in the CAS mirror.
|
||||
// It uses the public CAS http endpoint to download the file.
|
||||
func (m *Maintainer) checkUnauthenticated(ctx context.Context, expectedHash string) error {
|
||||
pubURL, err := m.MirrorURL(expectedHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.log.Debugf("Check: http get {Url: %v}", pubURL)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, pubURL, http.NoBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := m.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("unexpected status code %v", resp.StatusCode)
|
||||
}
|
||||
|
||||
actualHash := sha256.New()
|
||||
if _, err := io.Copy(actualHash, resp.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
return compareHashes(expectedHash, actualHash.Sum(nil))
|
||||
}
|
||||
|
||||
// put uploads a file to the CAS mirror.
|
||||
func (m *Maintainer) put(ctx context.Context, hash string, data io.Reader) error {
|
||||
if m.unauthenticated {
|
||||
return errors.New("cannot upload in unauthenticated mode")
|
||||
}
|
||||
|
||||
key := path.Join(keyBase, hash)
|
||||
if m.dryRun {
|
||||
m.log.Debugf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key)
|
||||
return nil
|
||||
}
|
||||
m.log.Debugf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key)
|
||||
_, err := m.uploadClient.Upload(ctx, &s3.PutObjectInput{
|
||||
Bucket: &m.bucket,
|
||||
Key: &key,
|
||||
Body: data,
|
||||
ChecksumAlgorithm: s3types.ChecksumAlgorithmSha256,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// downloadFromUpstream downloads a file from one of the existing (non-mirror) urls.
|
||||
func (m *Maintainer) downloadFromUpstream(ctx context.Context, url string) (body io.ReadCloser, retErr error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := m.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status code %v", resp.StatusCode)
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
func compareHashes(expectedHash string, actualHash []byte) error {
|
||||
if len(actualHash) != sha256.Size {
|
||||
return fmt.Errorf("actual hash should to be %v bytes, got %v", sha256.Size, len(actualHash))
|
||||
}
|
||||
if len(expectedHash) != hex.EncodedLen(sha256.Size) {
|
||||
return fmt.Errorf("expected hash should be %v bytes, got %v", hex.EncodedLen(sha256.Size), len(expectedHash))
|
||||
}
|
||||
actualHashStr := hex.EncodeToString(actualHash)
|
||||
if expectedHash != actualHashStr {
|
||||
return fmt.Errorf("expected hash %v, mirror returned %v", expectedHash, actualHashStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type objectStorageClient interface {
|
||||
GetObjectAttributes(ctx context.Context, params *s3.GetObjectAttributesInput, optFns ...func(*s3.Options)) (*s3.GetObjectAttributesOutput, error)
|
||||
}
|
||||
|
||||
type uploadClient interface {
|
||||
Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error)
|
||||
}
|
||||
|
||||
type httpClient interface {
|
||||
Get(url string) (*http.Response, error)
|
||||
Do(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
const (
|
||||
// DryRun is a flag to enable dry run mode.
|
||||
DryRun = true
|
||||
// Run is a flag to perform actual operations.
|
||||
Run = false
|
||||
keyBase = "constellation/cas/sha256"
|
||||
)
|
285
hack/bazel-deps-mirror/internal/mirror/mirror_test.go
Normal file
285
hack/bazel-deps-mirror/internal/mirror/mirror_test.go
Normal file
|
@ -0,0 +1,285 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/goleak"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestMirrorURL(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
hash string
|
||||
wantURL string
|
||||
wantErr bool
|
||||
}{
|
||||
"empty hash": {
|
||||
hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
wantURL: "https://example.com/constellation/cas/sha256/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
},
|
||||
"other hash": {
|
||||
hash: "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
wantURL: "https://example.com/constellation/cas/sha256/0000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
"invalid hash": {
|
||||
hash: "\x00",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
m := Maintainer{
|
||||
mirrorBaseURL: "https://example.com/",
|
||||
}
|
||||
url, err := m.MirrorURL(tc.hash)
|
||||
if tc.wantErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, tc.wantURL, url)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMirror(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
unauthenticated bool
|
||||
hash string
|
||||
data []byte
|
||||
upstreamURL string
|
||||
statusCode int
|
||||
failUpload bool
|
||||
wantErr bool
|
||||
}{
|
||||
"cannot upload in unauthenticated mode": {
|
||||
unauthenticated: true,
|
||||
hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
data: []byte(""),
|
||||
upstreamURL: "https://example.com/empty",
|
||||
statusCode: http.StatusOK,
|
||||
wantErr: true,
|
||||
},
|
||||
"http error": {
|
||||
hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
data: []byte(""),
|
||||
upstreamURL: "https://example.com/empty",
|
||||
statusCode: http.StatusNotFound,
|
||||
wantErr: true,
|
||||
},
|
||||
"hash mismatch": {
|
||||
hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
data: []byte("evil"),
|
||||
upstreamURL: "https://example.com/empty",
|
||||
statusCode: http.StatusOK,
|
||||
wantErr: true,
|
||||
},
|
||||
"upload error": {
|
||||
hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
data: []byte(""),
|
||||
upstreamURL: "https://example.com/empty",
|
||||
statusCode: http.StatusOK,
|
||||
failUpload: true,
|
||||
wantErr: true,
|
||||
},
|
||||
"success": {
|
||||
hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
data: []byte(""),
|
||||
upstreamURL: "https://example.com/empty",
|
||||
statusCode: http.StatusOK,
|
||||
},
|
||||
"success with different hash": {
|
||||
hash: "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
data: []byte("foo"),
|
||||
upstreamURL: "https://example.com/foo",
|
||||
statusCode: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
m := Maintainer{
|
||||
httpClient: &http.Client{
|
||||
Transport: &stubUpstream{
|
||||
statusCode: tc.statusCode,
|
||||
body: tc.data,
|
||||
},
|
||||
},
|
||||
uploadClient: &stubUploadClient{
|
||||
uploadErr: func() error {
|
||||
if tc.failUpload {
|
||||
return assert.AnError
|
||||
}
|
||||
return nil
|
||||
}(),
|
||||
},
|
||||
unauthenticated: tc.unauthenticated,
|
||||
log: logger.NewTest(t),
|
||||
}
|
||||
err := m.Mirror(context.Background(), tc.hash, []string{tc.upstreamURL})
|
||||
if tc.wantErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheck(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
hash string
|
||||
unauthenticatedResponse []byte
|
||||
unauthenticatedStatusCode int
|
||||
authenticatedResponse *s3.GetObjectAttributesOutput
|
||||
authenticatedErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"unauthenticated mode, http error": {
|
||||
hash: "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
unauthenticatedResponse: []byte("foo"), // ignored
|
||||
unauthenticatedStatusCode: http.StatusNotFound,
|
||||
wantErr: true,
|
||||
},
|
||||
"unauthenticated mode, hash mismatch": {
|
||||
hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
unauthenticatedResponse: []byte("foo"),
|
||||
unauthenticatedStatusCode: http.StatusOK,
|
||||
wantErr: true,
|
||||
},
|
||||
"unauthenticated mode, success": {
|
||||
hash: "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
unauthenticatedResponse: []byte("foo"),
|
||||
unauthenticatedStatusCode: http.StatusOK,
|
||||
},
|
||||
"authenticated mode, get attributes fails": {
|
||||
hash: "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
authenticatedErr: assert.AnError,
|
||||
wantErr: true,
|
||||
},
|
||||
"authenticated mode, hash mismatch": {
|
||||
hash: "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
authenticatedResponse: &s3.GetObjectAttributesOutput{
|
||||
Checksum: &types.Checksum{
|
||||
ChecksumSHA256: toPtr("tcH7Lvxta0Z0wv3MSM4BtDo7fAN2PAwzVd4Ame4PjHM="),
|
||||
},
|
||||
ObjectParts: &types.GetObjectAttributesParts{
|
||||
TotalPartsCount: 1,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"authenticated mode, success": {
|
||||
hash: "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
authenticatedResponse: &s3.GetObjectAttributesOutput{
|
||||
Checksum: &types.Checksum{
|
||||
ChecksumSHA256: toPtr("LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564="),
|
||||
},
|
||||
ObjectParts: &types.GetObjectAttributesParts{
|
||||
TotalPartsCount: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
"authenticated mode, fallback to unauthenticated": {
|
||||
hash: "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
authenticatedResponse: &s3.GetObjectAttributesOutput{
|
||||
ObjectParts: &types.GetObjectAttributesParts{
|
||||
TotalPartsCount: 2,
|
||||
},
|
||||
},
|
||||
unauthenticatedResponse: []byte("foo"),
|
||||
unauthenticatedStatusCode: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
m := Maintainer{
|
||||
unauthenticated: (tc.authenticatedResponse == nil),
|
||||
httpClient: &http.Client{
|
||||
Transport: &stubUpstream{
|
||||
statusCode: tc.unauthenticatedStatusCode,
|
||||
body: tc.unauthenticatedResponse,
|
||||
},
|
||||
},
|
||||
objectStorageClient: &stubObjectStorageClient{
|
||||
response: tc.authenticatedResponse,
|
||||
err: tc.authenticatedErr,
|
||||
},
|
||||
log: logger.NewTest(t),
|
||||
}
|
||||
err := m.Check(context.Background(), tc.hash)
|
||||
if tc.wantErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// stubUpstream implements http.RoundTripper and returns a canned response.
|
||||
type stubUpstream struct {
|
||||
statusCode int
|
||||
body []byte
|
||||
}
|
||||
|
||||
func (s *stubUpstream) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
log.Printf("stubUpstream: %s %s -> %q\n", req.Method, req.URL, string(s.body))
|
||||
return &http.Response{
|
||||
StatusCode: s.statusCode,
|
||||
Body: io.NopCloser(bytes.NewReader(s.body)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type stubUploadClient struct {
|
||||
uploadErr error
|
||||
uploadedData []byte
|
||||
}
|
||||
|
||||
func (s *stubUploadClient) Upload(
|
||||
_ context.Context, input *s3.PutObjectInput,
|
||||
_ ...func(*s3manager.Uploader),
|
||||
) (*s3manager.UploadOutput, error) {
|
||||
var err error
|
||||
s.uploadedData, err = io.ReadAll(input.Body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nil, s.uploadErr
|
||||
}
|
||||
|
||||
func toPtr[T any](v T) *T {
|
||||
return &v
|
||||
}
|
||||
|
||||
type stubObjectStorageClient struct {
|
||||
response *s3.GetObjectAttributesOutput
|
||||
err error
|
||||
}
|
||||
|
||||
func (s *stubObjectStorageClient) GetObjectAttributes(
|
||||
_ context.Context, _ *s3.GetObjectAttributesInput, _ ...func(*s3.Options),
|
||||
) (*s3.GetObjectAttributesOutput, error) {
|
||||
return s.response, s.err
|
||||
}
|
25
hack/bazel-deps-mirror/internal/rules/BUILD.bazel
Normal file
25
hack/bazel-deps-mirror/internal/rules/BUILD.bazel
Normal file
|
@ -0,0 +1,25 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("//bazel/go:go_test.bzl", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "rules",
|
||||
srcs = ["rules.go"],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules",
|
||||
visibility = ["//hack/bazel-deps-mirror:__subpackages__"],
|
||||
deps = [
|
||||
"@com_github_bazelbuild_buildtools//build",
|
||||
"@org_golang_x_exp//slices",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "rules_test",
|
||||
srcs = ["rules_test.go"],
|
||||
embed = [":rules"],
|
||||
deps = [
|
||||
"@com_github_bazelbuild_buildtools//build",
|
||||
"@com_github_stretchr_testify//assert",
|
||||
"@com_github_stretchr_testify//require",
|
||||
"@org_uber_go_goleak//:goleak",
|
||||
],
|
||||
)
|
297
hack/bazel-deps-mirror/internal/rules/rules.go
Normal file
297
hack/bazel-deps-mirror/internal/rules/rules.go
Normal file
|
@ -0,0 +1,297 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
// package rules is used find and modify Bazel rules in WORKSPACE and bzl files.
|
||||
package rules
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/buildtools/build"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// Rules is used to find and modify Bazel rules of a set of rule kinds in WORKSPACE and .bzl files.
|
||||
// Filter is a list of rule kinds to consider.
|
||||
// If filter is empty, all rules are considered.
|
||||
func Rules(file *build.File, filter []string) (rules []*build.Rule) {
|
||||
allRules := file.Rules("")
|
||||
if len(filter) == 0 {
|
||||
return allRules
|
||||
}
|
||||
ruleLoop:
|
||||
for _, rule := range allRules {
|
||||
for _, ruleKind := range filter {
|
||||
if rule.Kind() == ruleKind {
|
||||
rules = append(rules, rule)
|
||||
continue ruleLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ValidatePinned checks if the given rule is a pinned dependency rule.
|
||||
// That is, if it has a name, either a url or urls attribute, and a sha256 attribute.
|
||||
func ValidatePinned(rule *build.Rule) (validationErrs []error) {
|
||||
if rule.Name() == "" {
|
||||
validationErrs = append(validationErrs, errors.New("rule has no name"))
|
||||
}
|
||||
|
||||
hasURL := rule.Attr("url") != nil
|
||||
hasURLs := rule.Attr("urls") != nil
|
||||
if !hasURL && !hasURLs {
|
||||
validationErrs = append(validationErrs, errors.New("rule has no url or urls attribute"))
|
||||
}
|
||||
if hasURL && hasURLs {
|
||||
validationErrs = append(validationErrs, errors.New("rule has both url and urls attribute"))
|
||||
}
|
||||
if hasURL {
|
||||
url := rule.AttrString("url")
|
||||
if url == "" {
|
||||
validationErrs = append(validationErrs, errors.New("rule has empty url attribute"))
|
||||
}
|
||||
}
|
||||
if hasURLs {
|
||||
urls := rule.AttrStrings("urls")
|
||||
if len(urls) == 0 {
|
||||
validationErrs = append(validationErrs, errors.New("rule has empty urls list attribute"))
|
||||
} else {
|
||||
for _, url := range urls {
|
||||
if url == "" {
|
||||
validationErrs = append(validationErrs, errors.New("rule has empty url in urls attribute"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if rule.Attr("sha256") == nil {
|
||||
validationErrs = append(validationErrs, errors.New("rule has no sha256 attribute"))
|
||||
} else {
|
||||
sha256 := rule.AttrString("sha256")
|
||||
if sha256 == "" {
|
||||
validationErrs = append(validationErrs, errors.New("rule has empty sha256 attribute"))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Check checks if a dependency rule is normalized and contains a mirror url.
|
||||
// All errors reported by this function can be fixed by calling AddURLs and Normalize.
|
||||
func Check(rule *build.Rule) (validationErrs []error) {
|
||||
hasURL := rule.Attr("url") != nil
|
||||
if hasURL {
|
||||
validationErrs = append(validationErrs, errors.New("rule has url (singular) attribute"))
|
||||
}
|
||||
urls := rule.AttrStrings("urls")
|
||||
sorted := make([]string, len(urls))
|
||||
copy(sorted, urls)
|
||||
sortURLs(sorted)
|
||||
for i, url := range urls {
|
||||
if url != sorted[i] {
|
||||
validationErrs = append(validationErrs, errors.New("rule has unsorted urls attributes"))
|
||||
break
|
||||
}
|
||||
}
|
||||
if !HasMirrorURL(rule) {
|
||||
validationErrs = append(validationErrs, errors.New("rule is not mirrored"))
|
||||
}
|
||||
if rule.Kind() == "http_archive" && rule.Attr("type") == nil {
|
||||
validationErrs = append(validationErrs, errors.New("http_archive rule has no type attribute"))
|
||||
}
|
||||
if rule.Kind() == "rpm" && len(urls) != 1 {
|
||||
validationErrs = append(validationErrs, errors.New("rpm rule has unstable urls that are not the edgeless mirror"))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Normalize normalizes a rule and returns true if the rule was changed.
|
||||
func Normalize(rule *build.Rule) (changed bool) {
|
||||
changed = addTypeAttribute(rule)
|
||||
urls := GetURLs(rule)
|
||||
normalizedURLS := append([]string{}, urls...)
|
||||
// rpm rules must have exactly one url (the edgeless mirror)
|
||||
if mirrorU, err := mirrorURL(rule); rule.Kind() == "rpm" && err == nil {
|
||||
normalizedURLS = []string{mirrorU}
|
||||
}
|
||||
sortURLs(normalizedURLS)
|
||||
normalizedURLS = deduplicateURLs(normalizedURLS)
|
||||
if slices.Equal(urls, normalizedURLS) && rule.Attr("url") == nil {
|
||||
return
|
||||
}
|
||||
setURLs(rule, normalizedURLS)
|
||||
changed = true
|
||||
return
|
||||
}
|
||||
|
||||
// AddURLs adds a url to a rule.
|
||||
func AddURLs(rule *build.Rule, urls []string) {
|
||||
existingURLs := GetURLs(rule)
|
||||
existingURLs = append(existingURLs, urls...)
|
||||
sortURLs(existingURLs)
|
||||
deduplicatedURLs := deduplicateURLs(existingURLs)
|
||||
setURLs(rule, deduplicatedURLs)
|
||||
}
|
||||
|
||||
// GetHash returns the sha256 hash of a rule.
|
||||
func GetHash(rule *build.Rule) (string, error) {
|
||||
hash := rule.AttrString("sha256")
|
||||
if hash == "" {
|
||||
return "", fmt.Errorf("rule %s has empty or missing sha256 attribute", rule.Name())
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// GetURLs returns the sorted urls of a rule.
|
||||
func GetURLs(rule *build.Rule) []string {
|
||||
urls := rule.AttrStrings("urls")
|
||||
url := rule.AttrString("url")
|
||||
if url != "" {
|
||||
urls = append(urls, url)
|
||||
}
|
||||
return urls
|
||||
}
|
||||
|
||||
// HasMirrorURL returns true if the rule has a url from the Edgeless mirror.
|
||||
func HasMirrorURL(rule *build.Rule) bool {
|
||||
_, err := mirrorURL(rule)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func deduplicateURLs(urls []string) (deduplicated []string) {
|
||||
seen := make(map[string]bool)
|
||||
for _, url := range urls {
|
||||
if !seen[url] {
|
||||
deduplicated = append(deduplicated, url)
|
||||
seen[url] = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// addTypeAttribute adds the type attribute to http_archive rules if it is missing.
|
||||
// it returns true if the rule was changed.
|
||||
// it returns an error if the rule does not have enough information to add the type attribute.
|
||||
func addTypeAttribute(rule *build.Rule) bool {
|
||||
// only http_archive rules have a type attribute
|
||||
if rule.Kind() != "http_archive" {
|
||||
return false
|
||||
}
|
||||
if rule.Attr("type") != nil {
|
||||
return false
|
||||
}
|
||||
// iterate over all URLs and check if they have a known archive type
|
||||
var archiveType string
|
||||
urlLoop:
|
||||
for _, url := range GetURLs(rule) {
|
||||
switch {
|
||||
case strings.HasSuffix(url, ".aar"):
|
||||
archiveType = "aar"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".ar"):
|
||||
archiveType = "ar"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".deb"):
|
||||
archiveType = "deb"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".jar"):
|
||||
archiveType = "jar"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".tar.bz2"):
|
||||
archiveType = "tar.bz2"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".tar.gz"):
|
||||
archiveType = "tar.gz"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".tar.xz"):
|
||||
archiveType = "tar.xz"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".tar.zst"):
|
||||
archiveType = "tar.zst"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".tar"):
|
||||
archiveType = "tar"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".tgz"):
|
||||
archiveType = "tgz"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".txz"):
|
||||
archiveType = "txz"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".tzst"):
|
||||
archiveType = "tzst"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".war"):
|
||||
archiveType = "war"
|
||||
break urlLoop
|
||||
case strings.HasSuffix(url, ".zip"):
|
||||
archiveType = "zip"
|
||||
break urlLoop
|
||||
}
|
||||
}
|
||||
if archiveType == "" {
|
||||
return false
|
||||
}
|
||||
rule.SetAttr("type", &build.StringExpr{Value: archiveType})
|
||||
return true
|
||||
}
|
||||
|
||||
// mirrorURL returns the first mirror URL for a rule.
|
||||
func mirrorURL(rule *build.Rule) (string, error) {
|
||||
urls := GetURLs(rule)
|
||||
for _, url := range urls {
|
||||
if strings.HasPrefix(url, edgelessMirrorPrefix) {
|
||||
return url, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("rule %s has no mirror url", rule.Name())
|
||||
}
|
||||
|
||||
func setURLs(rule *build.Rule, urls []string) {
|
||||
// delete single url attribute if it exists
|
||||
rule.DelAttr("url")
|
||||
urlsAttr := []build.Expr{}
|
||||
for _, url := range urls {
|
||||
urlsAttr = append(urlsAttr, &build.StringExpr{Value: url})
|
||||
}
|
||||
rule.SetAttr("urls", &build.ListExpr{List: urlsAttr, ForceMultiLine: true})
|
||||
}
|
||||
|
||||
func sortURLs(urls []string) {
|
||||
// Bazel mirror should be first
|
||||
// edgeless mirror should be second
|
||||
// other urls should be last
|
||||
// if there are multiple urls from the same mirror, they should be sorted alphabetically
|
||||
sort.Slice(urls, func(i, j int) bool {
|
||||
rank := func(url string) int {
|
||||
if strings.HasPrefix(url, bazelMirrorPrefix) {
|
||||
return 0
|
||||
}
|
||||
if strings.HasPrefix(url, edgelessMirrorPrefix) {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
if rank(urls[i]) != rank(urls[j]) {
|
||||
return rank(urls[i]) < rank(urls[j])
|
||||
}
|
||||
return urls[i] < urls[j]
|
||||
})
|
||||
}
|
||||
|
||||
// SupportedRules is a list of all rules that can be mirrored.
|
||||
var SupportedRules = []string{
|
||||
"http_archive",
|
||||
"http_file",
|
||||
"rpm",
|
||||
}
|
||||
|
||||
const (
|
||||
bazelMirrorPrefix = "https://mirror.bazel.build/"
|
||||
edgelessMirrorPrefix = "https://cdn.confidential.cloud/constellation/cas/sha256/"
|
||||
)
|
450
hack/bazel-deps-mirror/internal/rules/rules_test.go
Normal file
450
hack/bazel-deps-mirror/internal/rules/rules_test.go
Normal file
|
@ -0,0 +1,450 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package rules
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/bazelbuild/buildtools/build"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestRules(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
const bzlFileContents = `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
|
||||
load("@bazeldnf//:deps.bzl", "rpm")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = ["https://example.com/foo.tar.gz"],
|
||||
)
|
||||
|
||||
http_file(
|
||||
name = "bar_file",
|
||||
sha256 = "fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9",
|
||||
urls = ["https://example.com/bar"],
|
||||
)
|
||||
|
||||
rpm(
|
||||
name = "baz_rpm",
|
||||
sha256 = "9e7ab438597fee20e16e8e441bed0ce966bd59e0fb993fa7c94be31fb1384d88",
|
||||
urls = ["https://example.com/baz.rpm"],
|
||||
)
|
||||
|
||||
git_repository(
|
||||
name = "qux_git",
|
||||
remote = "https://example.com/qux.git",
|
||||
commit = "1234567890abcdef",
|
||||
)
|
||||
`
|
||||
bf, err := build.Parse("foo.bzl", []byte(bzlFileContents))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rules := Rules(bf, SupportedRules)
|
||||
assert.Len(rules, 3)
|
||||
expectedNames := []string{"foo_archive", "bar_file", "baz_rpm"}
|
||||
for i, rule := range rules {
|
||||
assert.Equal(expectedNames[i], rule.Name())
|
||||
}
|
||||
|
||||
allRules := Rules(bf, nil)
|
||||
assert.Len(allRules, 4)
|
||||
expectedNames = []string{"foo_archive", "bar_file", "baz_rpm", "qux_git"}
|
||||
for i, rule := range allRules {
|
||||
assert.Equal(expectedNames[i], rule.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePinned(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
rule string
|
||||
expectedIssueCount int
|
||||
}{
|
||||
"no issues, singular url": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
url = "https://example.com/foo.tar.gz",
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 0,
|
||||
},
|
||||
"no issues, url list": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = ["https://example.com/foo.tar.gz"],
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 0,
|
||||
},
|
||||
"no issues, url list with multiple urls": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = ["https://example.com/foo.tar.gz", "https://example.com/foo2.tar.gz"],
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 0,
|
||||
},
|
||||
"missing name": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
url = "https://example.com/foo.tar.gz",
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"missing sha256 attribute": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
url = "https://example.com/foo.tar.gz",
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"missing url attribute": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"url and urls attribute given": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
url = "https://example.com/foo.tar.gz",
|
||||
urls = ["https://example.com/foo.tar.gz"],
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"empty url attribute": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
url = "",
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"empty urls attribute": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = [],
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"empty url in urls attribute": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = [""],
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"empty sha256 attribute": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "",
|
||||
url = "https://example.com/foo.tar.gz",
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"missing all attributes": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
bf, err := build.Parse("foo.bzl", []byte(tc.rule))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rules := Rules(bf, SupportedRules)
|
||||
require.Len(rules, 1)
|
||||
|
||||
issues := ValidatePinned(rules[0])
|
||||
if tc.expectedIssueCount == 0 {
|
||||
assert.Nil(issues)
|
||||
return
|
||||
}
|
||||
assert.Len(issues, tc.expectedIssueCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckNormalize(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
rule string
|
||||
expectedIssueCount int
|
||||
cannotFix bool
|
||||
}{
|
||||
"rule with single url": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
url = "https://cdn.confidential.cloud/constellation/cas/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
type = "tar.gz",
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"rule with unsorted urls": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = [
|
||||
"https://example.com/a/foo.tar.gz",
|
||||
"https://example.com/b/foo.tar.gz",
|
||||
"https://cdn.confidential.cloud/constellation/cas/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
"https://mirror.bazel.build/example.com/a/foo.tar.gz",
|
||||
],
|
||||
type = "tar.gz",
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"rule that is not mirrored": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = ["https://example.com/foo.tar.gz"],
|
||||
type = "tar.gz",
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
cannotFix: true,
|
||||
},
|
||||
"http_archive with no type": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = [
|
||||
"https://cdn.confidential.cloud/constellation/cas/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
"https://example.com/foo.tar.gz",
|
||||
],
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"rpm rule with urls that are not the mirror": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
rpm(
|
||||
name = "foo_rpm",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = [
|
||||
"https://cdn.confidential.cloud/constellation/cas/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
"https://example.com/foo.rpm",
|
||||
],
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 1,
|
||||
},
|
||||
"http_archive rule that is correct": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = ["https://cdn.confidential.cloud/constellation/cas/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"],
|
||||
type = "tar.gz",
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 0,
|
||||
},
|
||||
"rpm rule that is correct": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
rpm(
|
||||
name = "foo_rpm",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = ["https://cdn.confidential.cloud/constellation/cas/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"],
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 0,
|
||||
},
|
||||
"http_file rule that is correct": {
|
||||
rule: `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
|
||||
|
||||
http_file(
|
||||
name = "foo_file",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
urls = ["https://cdn.confidential.cloud/constellation/cas/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"],
|
||||
)
|
||||
`,
|
||||
expectedIssueCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
bf, err := build.Parse("foo.bzl", []byte(tc.rule))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rules := Rules(bf, SupportedRules)
|
||||
require.Len(rules, 1)
|
||||
|
||||
issues := Check(rules[0])
|
||||
if tc.expectedIssueCount == 0 {
|
||||
assert.Nil(issues)
|
||||
return
|
||||
}
|
||||
assert.Equal(len(issues), tc.expectedIssueCount)
|
||||
|
||||
changed := Normalize(rules[0])
|
||||
if tc.expectedIssueCount > 0 && !tc.cannotFix {
|
||||
assert.True(changed)
|
||||
} else {
|
||||
assert.False(changed)
|
||||
}
|
||||
if tc.cannotFix {
|
||||
assert.NotNil(Check(rules[0]))
|
||||
} else {
|
||||
assert.Nil(Check(rules[0]))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddURLs(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
rule := `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
)
|
||||
`
|
||||
bf, err := build.Parse("foo.bzl", []byte(rule))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rules := Rules(bf, SupportedRules)
|
||||
require.Len(rules, 1)
|
||||
|
||||
AddURLs(rules[0], []string{"https://example.com/a", "https://example.com/b"})
|
||||
assert.Equal([]string{"https://example.com/a", "https://example.com/b"}, GetURLs(rules[0]))
|
||||
}
|
||||
|
||||
func TestGetHash(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
rule := `
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "foo_archive",
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "bar_archive",
|
||||
)
|
||||
`
|
||||
bf, err := build.Parse("foo.bzl", []byte(rule))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rules := Rules(bf, SupportedRules)
|
||||
require.Len(rules, 2)
|
||||
|
||||
hash, err := GetHash(rules[0])
|
||||
assert.NoError(err)
|
||||
assert.Equal("2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", hash)
|
||||
|
||||
_, err = GetHash(rules[1])
|
||||
assert.Error(err)
|
||||
}
|
15
hack/go.mod
15
hack/go.mod
|
@ -37,11 +37,19 @@ replace (
|
|||
)
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.19
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0
|
||||
github.com/bazelbuild/buildtools v0.0.0-20230317132445-9c3c1fc0106e
|
||||
github.com/edgelesssys/constellation/v2 v2.6.0
|
||||
github.com/go-git/go-git/v5 v5.5.2
|
||||
github.com/hexops/gotextdiff v1.0.3
|
||||
github.com/spf13/afero v1.9.5
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/stretchr/testify v1.8.2
|
||||
go.uber.org/goleak v1.2.1
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/exp v0.0.0-20220823124025-807a23277127
|
||||
golang.org/x/mod v0.8.0
|
||||
gopkg.in/square/go-jose.v2 v2.6.0
|
||||
libvirt.org/go/libvirt v1.8010.0
|
||||
|
@ -80,10 +88,8 @@ require (
|
|||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 // indirect
|
||||
|
@ -94,7 +100,6 @@ require (
|
|||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 // indirect
|
||||
|
@ -239,14 +244,13 @@ require (
|
|||
github.com/sassoftware/relic v0.0.0-20210427151427-dfb082b79b74 // indirect
|
||||
github.com/schollz/progressbar/v3 v3.13.1 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.5.0 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/sergi/go-diff v1.3.1 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/siderolabs/talos/pkg/machinery v1.3.2 // indirect
|
||||
github.com/sigstore/rekor v1.0.1 // indirect
|
||||
github.com/sigstore/sigstore v1.6.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/skeema/knownhosts v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.9.5 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tent/canonical-json-go v0.0.0-20130607151641-96e4ba3a7613 // indirect
|
||||
|
@ -265,7 +269,6 @@ require (
|
|||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220823124025-807a23277127 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/oauth2 v0.6.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
|
|
|
@ -257,6 +257,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8Ncjj
|
|||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
|
||||
github.com/bazelbuild/buildtools v0.0.0-20230317132445-9c3c1fc0106e h1:XmPu4mXICgdGnC5dXGjUGbwUD/kUmS0l5Aop3LaevBM=
|
||||
github.com/bazelbuild/buildtools v0.0.0-20230317132445-9c3c1fc0106e/go.mod h1:689QdV3hBP7Vo9dJMmzhoYIyo/9iMhEmHkJcnaPRCbo=
|
||||
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
|
@ -809,6 +811,8 @@ github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjl
|
|||
github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI=
|
||||
github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s=
|
||||
github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM=
|
||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||
github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc=
|
||||
github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc=
|
||||
github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
|
||||
|
@ -1208,8 +1212,9 @@ github.com/secure-systems-lab/go-securesystemslib v0.5.0 h1:oTiNu0QnulMQgN/hLK12
|
|||
github.com/secure-systems-lab/go-securesystemslib v0.5.0/go.mod h1:uoCqUC0Ap7jrBSEanxT+SdACYJTVplRXWLkGMuDjXqk=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
|
||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
|
@ -1424,6 +1429,7 @@ go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi
|
|||
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
|
||||
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.starlark.net v0.0.0-20210223155950-e043a3d3c984/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0=
|
||||
go.starlark.net v0.0.0-20220223235035-243c74974e97 h1:ghIB+2LQvihWROIGpcAVPq/ce5O2uMQersgxXiOeTS4=
|
||||
go.starlark.net v0.0.0-20220223235035-243c74974e97/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
|
@ -1435,6 +1441,7 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
|||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue