internal: refactor storage credentials (#1071)

* Move storage clients to separate packages

* Allow setting of client credentials for AWS S3

* Use managed identity client secret or default credentials for Azure Blob Storage

* Use credentials file to authorize GCS client

---------

Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
Daniel Weiße 2023-03-02 15:08:31 +01:00 committed by GitHub
parent 96b4b74a7a
commit 5eb73706f5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 857 additions and 1130 deletions

View file

@ -11,7 +11,6 @@ package test
import (
"context"
"flag"
"strings"
"testing"
"time"
@ -19,9 +18,9 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/edgelesssys/constellation/v2/internal/kms/kms/aws"
"github.com/edgelesssys/constellation/v2/internal/kms/storage"
"github.com/edgelesssys/constellation/v2/internal/kms/storage/awss3"
"github.com/edgelesssys/constellation/v2/internal/kms/storage/memfs"
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -29,53 +28,41 @@ func TestAwsStorage(t *testing.T) {
if !*runAwsStorage {
t.Skip("Skipping AWS storage test")
}
assert := assert.New(t)
if *awsAccessKey == "" || *awsAccessKeyID == "" || *awsBucket == "" || *awsRegion == "" {
flag.Usage()
t.Fatal("Required flags not set: --aws-access-key, --aws-access-key-id, --aws-bucket, --aws-region")
}
require := require.New(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
bucketID := strings.ToLower(addSuffix("test-bucket"))
// create bucket
store, err := storage.NewAWSS3Storage(ctx, bucketID, func(*s3.Options) {})
cfg := uri.AWSS3Config{
Bucket: *awsBucket,
AccessKeyID: *awsAccessKeyID,
AccessKey: *awsAccessKey,
Region: *awsRegion,
}
store, err := awss3.New(ctx, cfg)
require.NoError(err)
testDEK1 := []byte("test DEK")
testDEK2 := []byte("more test DEK")
runStorageTest(t, store)
// request unset value
_, err = store.Get(ctx, "test:input")
assert.Error(err)
// test Put method
assert.NoError(store.Put(ctx, "volume01", testDEK1))
assert.NoError(store.Put(ctx, "volume02", testDEK2))
// make sure values have been set
val, err := store.Get(ctx, "volume01")
assert.NoError(err)
assert.Equal(testDEK1, val)
val, err = store.Get(ctx, "volume02")
assert.NoError(err)
assert.Equal(testDEK2, val)
_, err = store.Get(ctx, "invalid:key")
assert.Error(err)
assert.ErrorIs(err, storage.ErrDEKUnset)
cleanUpBucket(ctx, require, bucketID, func(*s3.Options) {})
cleanUpBucket(ctx, require, *awsBucket, *awsRegion)
}
func cleanUpBucket(ctx context.Context, require *require.Assertions, bucketID string, optFns ...func(*s3.Options)) {
cfg, err := config.LoadDefaultConfig(ctx)
func cleanUpBucket(ctx context.Context, require *require.Assertions, bucketID, awsRegion string) {
cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(awsRegion))
require.NoError(err)
client := s3.NewFromConfig(cfg, optFns...)
client := s3.NewFromConfig(cfg)
// List all objects of the bucket
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: &bucketID,
}
output, _ := client.ListObjectsV2(ctx, listObjectsInput)
output, err := client.ListObjectsV2(ctx, listObjectsInput)
require.NoError(err)
var objects []string
var i int32
for i = 0; i < output.KeyCount; i++ {
@ -117,7 +104,7 @@ func TestAwsKms(t *testing.T) {
require := require.New(t)
store := storage.NewMemMapStorage()
store := memfs.New()
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()