2022-09-05 03:06:08 -04:00
|
|
|
/*
|
|
|
|
Copyright (c) Edgeless Systems GmbH
|
|
|
|
|
|
|
|
SPDX-License-Identifier: AGPL-3.0-only
|
|
|
|
*/
|
|
|
|
|
2022-03-22 11:03:15 -04:00
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2022-06-28 11:03:28 -04:00
|
|
|
"context"
|
2023-12-05 06:28:40 -05:00
|
|
|
"crypto/sha256"
|
|
|
|
"crypto/sha512"
|
2022-03-22 11:03:15 -04:00
|
|
|
"encoding/base64"
|
2022-06-28 11:03:28 -04:00
|
|
|
"encoding/json"
|
2022-03-22 11:03:15 -04:00
|
|
|
"errors"
|
2022-06-28 11:03:28 -04:00
|
|
|
"net"
|
|
|
|
"strconv"
|
2023-04-03 09:06:27 -04:00
|
|
|
"strings"
|
2022-03-22 11:03:15 -04:00
|
|
|
"testing"
|
|
|
|
|
2022-09-21 07:47:57 -04:00
|
|
|
"github.com/edgelesssys/constellation/v2/internal/atls"
|
2023-04-03 09:06:27 -04:00
|
|
|
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
2023-06-09 09:41:02 -04:00
|
|
|
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
2022-09-21 07:47:57 -04:00
|
|
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
|
|
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
|
|
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
2023-12-08 10:27:04 -05:00
|
|
|
"github.com/edgelesssys/constellation/v2/internal/constellation/state"
|
2022-09-21 07:47:57 -04:00
|
|
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
|
|
|
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
|
|
|
|
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
|
2022-11-21 11:02:33 -05:00
|
|
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
2022-09-21 07:47:57 -04:00
|
|
|
"github.com/edgelesssys/constellation/v2/verify/verifyproto"
|
2023-09-08 02:08:09 -04:00
|
|
|
tpmProto "github.com/google/go-tpm-tools/proto/tpm"
|
2022-04-27 05:17:41 -04:00
|
|
|
"github.com/spf13/afero"
|
2022-03-22 11:03:15 -04:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2022-06-28 11:03:28 -04:00
|
|
|
"google.golang.org/grpc"
|
2022-03-22 11:03:15 -04:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
rpcStatus "google.golang.org/grpc/status"
|
|
|
|
)
|
|
|
|
|
2022-04-27 05:17:41 -04:00
|
|
|
func TestVerify(t *testing.T) {
|
|
|
|
zeroBase64 := base64.StdEncoding.EncodeToString([]byte("00000000000000000000000000000000"))
|
|
|
|
someErr := errors.New("failed")
|
2022-03-22 11:03:15 -04:00
|
|
|
|
|
|
|
testCases := map[string]struct {
|
2023-08-04 07:53:51 -04:00
|
|
|
provider cloudprovider.Provider
|
|
|
|
protoClient *stubVerifyClient
|
|
|
|
formatter *stubAttDocFormatter
|
|
|
|
nodeEndpointFlag string
|
|
|
|
clusterIDFlag string
|
2023-11-20 05:17:16 -05:00
|
|
|
stateFile *state.State
|
2023-08-04 07:53:51 -04:00
|
|
|
wantEndpoint string
|
|
|
|
skipConfigCreation bool
|
|
|
|
wantErr bool
|
2022-03-22 11:03:15 -04:00
|
|
|
}{
|
2022-04-27 05:17:41 -04:00
|
|
|
"gcp": {
|
2022-05-04 02:50:50 -04:00
|
|
|
provider: cloudprovider.GCP,
|
|
|
|
nodeEndpointFlag: "192.0.2.1:1234",
|
2022-09-11 09:58:31 -04:00
|
|
|
clusterIDFlag: zeroBase64,
|
2022-06-28 11:03:28 -04:00
|
|
|
protoClient: &stubVerifyClient{},
|
2023-11-20 05:17:16 -05:00
|
|
|
stateFile: defaultStateFile(cloudprovider.GCP),
|
2022-07-01 04:57:29 -04:00
|
|
|
wantEndpoint: "192.0.2.1:1234",
|
2023-04-03 09:06:27 -04:00
|
|
|
formatter: &stubAttDocFormatter{},
|
2022-04-27 05:17:41 -04:00
|
|
|
},
|
|
|
|
"azure": {
|
2022-05-04 02:50:50 -04:00
|
|
|
provider: cloudprovider.Azure,
|
|
|
|
nodeEndpointFlag: "192.0.2.1:1234",
|
2022-09-11 09:58:31 -04:00
|
|
|
clusterIDFlag: zeroBase64,
|
2022-06-28 11:03:28 -04:00
|
|
|
protoClient: &stubVerifyClient{},
|
2023-11-20 05:17:16 -05:00
|
|
|
stateFile: defaultStateFile(cloudprovider.Azure),
|
2022-07-01 04:57:29 -04:00
|
|
|
wantEndpoint: "192.0.2.1:1234",
|
2023-04-03 09:06:27 -04:00
|
|
|
formatter: &stubAttDocFormatter{},
|
2022-05-04 02:50:50 -04:00
|
|
|
},
|
|
|
|
"default port": {
|
|
|
|
provider: cloudprovider.GCP,
|
|
|
|
nodeEndpointFlag: "192.0.2.1",
|
2022-09-11 09:58:31 -04:00
|
|
|
clusterIDFlag: zeroBase64,
|
2022-06-28 11:03:28 -04:00
|
|
|
protoClient: &stubVerifyClient{},
|
2023-11-20 05:17:16 -05:00
|
|
|
stateFile: defaultStateFile(cloudprovider.GCP),
|
2022-08-01 10:51:34 -04:00
|
|
|
wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC),
|
2023-04-03 09:06:27 -04:00
|
|
|
formatter: &stubAttDocFormatter{},
|
2022-07-01 04:57:29 -04:00
|
|
|
},
|
|
|
|
"endpoint not set": {
|
2022-09-11 09:58:31 -04:00
|
|
|
provider: cloudprovider.GCP,
|
|
|
|
clusterIDFlag: zeroBase64,
|
|
|
|
protoClient: &stubVerifyClient{},
|
2023-11-03 10:47:03 -04:00
|
|
|
stateFile: func() *state.State {
|
2023-11-20 05:17:16 -05:00
|
|
|
s := defaultStateFile(cloudprovider.GCP)
|
2023-11-03 10:47:03 -04:00
|
|
|
s.Infrastructure.ClusterEndpoint = ""
|
|
|
|
return s
|
2023-11-20 05:17:16 -05:00
|
|
|
}(),
|
2023-11-03 10:47:03 -04:00
|
|
|
formatter: &stubAttDocFormatter{},
|
|
|
|
wantErr: true,
|
2022-07-01 04:57:29 -04:00
|
|
|
},
|
2023-10-09 07:04:29 -04:00
|
|
|
"endpoint from state file": {
|
2022-09-11 09:58:31 -04:00
|
|
|
provider: cloudprovider.GCP,
|
|
|
|
clusterIDFlag: zeroBase64,
|
|
|
|
protoClient: &stubVerifyClient{},
|
2023-11-03 10:47:03 -04:00
|
|
|
stateFile: func() *state.State {
|
2023-11-20 05:17:16 -05:00
|
|
|
s := defaultStateFile(cloudprovider.GCP)
|
2023-11-03 10:47:03 -04:00
|
|
|
s.Infrastructure.ClusterEndpoint = "192.0.2.1"
|
|
|
|
return s
|
2023-11-20 05:17:16 -05:00
|
|
|
}(),
|
2023-11-03 10:47:03 -04:00
|
|
|
wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC),
|
|
|
|
formatter: &stubAttDocFormatter{},
|
2022-07-01 04:57:29 -04:00
|
|
|
},
|
|
|
|
"override endpoint from details file": {
|
|
|
|
provider: cloudprovider.GCP,
|
|
|
|
nodeEndpointFlag: "192.0.2.2:1234",
|
2022-09-11 09:58:31 -04:00
|
|
|
clusterIDFlag: zeroBase64,
|
2022-07-01 04:57:29 -04:00
|
|
|
protoClient: &stubVerifyClient{},
|
2023-11-03 10:47:03 -04:00
|
|
|
stateFile: func() *state.State {
|
2023-11-20 05:17:16 -05:00
|
|
|
s := defaultStateFile(cloudprovider.GCP)
|
2023-11-03 10:47:03 -04:00
|
|
|
s.Infrastructure.ClusterEndpoint = "192.0.2.1"
|
|
|
|
return s
|
2023-11-20 05:17:16 -05:00
|
|
|
}(),
|
2023-11-03 10:47:03 -04:00
|
|
|
wantEndpoint: "192.0.2.2:1234",
|
|
|
|
formatter: &stubAttDocFormatter{},
|
2022-05-04 02:50:50 -04:00
|
|
|
},
|
|
|
|
"invalid endpoint": {
|
|
|
|
provider: cloudprovider.GCP,
|
|
|
|
nodeEndpointFlag: ":::::",
|
2022-09-11 09:58:31 -04:00
|
|
|
clusterIDFlag: zeroBase64,
|
2022-06-28 11:03:28 -04:00
|
|
|
protoClient: &stubVerifyClient{},
|
2023-11-20 05:17:16 -05:00
|
|
|
stateFile: defaultStateFile(cloudprovider.GCP),
|
2023-04-03 09:06:27 -04:00
|
|
|
formatter: &stubAttDocFormatter{},
|
2022-05-04 02:50:50 -04:00
|
|
|
wantErr: true,
|
2022-04-27 05:17:41 -04:00
|
|
|
},
|
|
|
|
"neither owner id nor cluster id set": {
|
2022-05-04 02:50:50 -04:00
|
|
|
provider: cloudprovider.GCP,
|
|
|
|
nodeEndpointFlag: "192.0.2.1:1234",
|
2023-11-03 10:47:03 -04:00
|
|
|
stateFile: func() *state.State {
|
2023-11-20 05:17:16 -05:00
|
|
|
s := defaultStateFile(cloudprovider.GCP)
|
2023-11-03 10:47:03 -04:00
|
|
|
s.ClusterValues.OwnerID = ""
|
|
|
|
s.ClusterValues.ClusterID = ""
|
|
|
|
return s
|
2023-11-20 05:17:16 -05:00
|
|
|
}(),
|
2023-11-03 10:47:03 -04:00
|
|
|
formatter: &stubAttDocFormatter{},
|
|
|
|
protoClient: &stubVerifyClient{},
|
|
|
|
wantErr: true,
|
2022-03-22 11:03:15 -04:00
|
|
|
},
|
2023-10-09 07:04:29 -04:00
|
|
|
"use owner id from state file": {
|
2022-07-01 04:57:29 -04:00
|
|
|
provider: cloudprovider.GCP,
|
|
|
|
nodeEndpointFlag: "192.0.2.1:1234",
|
|
|
|
protoClient: &stubVerifyClient{},
|
2023-11-03 10:47:03 -04:00
|
|
|
stateFile: func() *state.State {
|
2023-11-20 05:17:16 -05:00
|
|
|
s := defaultStateFile(cloudprovider.GCP)
|
2023-11-03 10:47:03 -04:00
|
|
|
s.ClusterValues.OwnerID = zeroBase64
|
|
|
|
return s
|
2023-11-20 05:17:16 -05:00
|
|
|
}(),
|
2023-11-03 10:47:03 -04:00
|
|
|
wantEndpoint: "192.0.2.1:1234",
|
|
|
|
formatter: &stubAttDocFormatter{},
|
2022-07-01 04:57:29 -04:00
|
|
|
},
|
2022-05-13 05:56:43 -04:00
|
|
|
"config file not existing": {
|
2023-08-04 07:53:51 -04:00
|
|
|
provider: cloudprovider.GCP,
|
|
|
|
clusterIDFlag: zeroBase64,
|
|
|
|
nodeEndpointFlag: "192.0.2.1:1234",
|
2023-11-20 05:17:16 -05:00
|
|
|
stateFile: defaultStateFile(cloudprovider.GCP),
|
2023-08-04 07:53:51 -04:00
|
|
|
formatter: &stubAttDocFormatter{},
|
|
|
|
skipConfigCreation: true,
|
|
|
|
wantErr: true,
|
2022-04-27 05:17:41 -04:00
|
|
|
},
|
|
|
|
"error protoClient GetState": {
|
2022-05-04 02:50:50 -04:00
|
|
|
provider: cloudprovider.Azure,
|
|
|
|
nodeEndpointFlag: "192.0.2.1:1234",
|
2022-09-11 09:58:31 -04:00
|
|
|
clusterIDFlag: zeroBase64,
|
2022-06-28 11:03:28 -04:00
|
|
|
protoClient: &stubVerifyClient{verifyErr: rpcStatus.Error(codes.Internal, "failed")},
|
2023-11-20 05:17:16 -05:00
|
|
|
stateFile: defaultStateFile(cloudprovider.Azure),
|
2023-04-03 09:06:27 -04:00
|
|
|
formatter: &stubAttDocFormatter{},
|
2022-05-04 02:50:50 -04:00
|
|
|
wantErr: true,
|
2022-03-22 11:03:15 -04:00
|
|
|
},
|
2022-04-27 05:17:41 -04:00
|
|
|
"error protoClient GetState not rpc": {
|
2022-05-04 02:50:50 -04:00
|
|
|
provider: cloudprovider.Azure,
|
|
|
|
nodeEndpointFlag: "192.0.2.1:1234",
|
2022-09-11 09:58:31 -04:00
|
|
|
clusterIDFlag: zeroBase64,
|
2022-06-28 11:03:28 -04:00
|
|
|
protoClient: &stubVerifyClient{verifyErr: someErr},
|
2023-11-20 05:17:16 -05:00
|
|
|
stateFile: defaultStateFile(cloudprovider.Azure),
|
2023-04-03 09:06:27 -04:00
|
|
|
formatter: &stubAttDocFormatter{},
|
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
"format error": {
|
|
|
|
provider: cloudprovider.Azure,
|
|
|
|
nodeEndpointFlag: "192.0.2.1:1234",
|
|
|
|
clusterIDFlag: zeroBase64,
|
|
|
|
protoClient: &stubVerifyClient{},
|
2023-11-20 05:17:16 -05:00
|
|
|
stateFile: defaultStateFile(cloudprovider.Azure),
|
2023-04-03 09:06:27 -04:00
|
|
|
wantEndpoint: "192.0.2.1:1234",
|
|
|
|
formatter: &stubAttDocFormatter{formatErr: someErr},
|
2022-05-04 02:50:50 -04:00
|
|
|
wantErr: true,
|
2022-03-22 11:03:15 -04:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tc := range testCases {
|
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
require := require.New(t)
|
|
|
|
|
2022-06-08 02:14:28 -04:00
|
|
|
cmd := NewVerifyCmd()
|
2022-05-02 16:34:59 -04:00
|
|
|
out := &bytes.Buffer{}
|
2023-10-07 10:24:29 -04:00
|
|
|
cmd.SetErr(out)
|
2022-08-29 08:18:05 -04:00
|
|
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
2022-03-22 11:03:15 -04:00
|
|
|
|
2023-08-04 07:53:51 -04:00
|
|
|
if !tc.skipConfigCreation {
|
|
|
|
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), tc.provider)
|
|
|
|
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, cfg))
|
|
|
|
}
|
2023-11-20 05:17:16 -05:00
|
|
|
require.NoError(tc.stateFile.WriteToFile(fileHandler, constants.StateFilename))
|
2022-07-01 04:57:29 -04:00
|
|
|
|
2023-10-16 09:05:29 -04:00
|
|
|
v := &verifyCmd{
|
|
|
|
fileHandler: fileHandler,
|
|
|
|
log: logger.NewTest(t),
|
|
|
|
flags: verifyFlags{
|
|
|
|
clusterID: tc.clusterIDFlag,
|
|
|
|
endpoint: tc.nodeEndpointFlag,
|
|
|
|
},
|
|
|
|
}
|
2024-01-24 09:10:15 -05:00
|
|
|
formatterFac := func(_ string, _ variant.Variant, _ debugLog) (attestationDocFormatter, error) {
|
2023-10-07 10:24:29 -04:00
|
|
|
return tc.formatter, nil
|
|
|
|
}
|
2023-10-16 09:05:29 -04:00
|
|
|
err := v.verify(cmd, tc.protoClient, formatterFac, stubAttestationFetcher{})
|
2022-04-26 10:54:05 -04:00
|
|
|
if tc.wantErr {
|
2022-03-22 11:03:15 -04:00
|
|
|
assert.Error(err)
|
|
|
|
} else {
|
|
|
|
assert.NoError(err)
|
2022-04-27 05:17:41 -04:00
|
|
|
assert.Contains(out.String(), "OK")
|
2022-07-01 04:57:29 -04:00
|
|
|
assert.Equal(tc.wantEndpoint, tc.protoClient.endpoint)
|
2022-03-22 11:03:15 -04:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-03 09:06:27 -04:00
|
|
|
type stubAttDocFormatter struct {
|
|
|
|
formatErr error
|
|
|
|
}
|
|
|
|
|
2023-11-07 06:17:08 -05:00
|
|
|
func (f *stubAttDocFormatter) format(_ context.Context, _ string, _ bool, _ config.AttestationCfg) (string, error) {
|
2023-04-03 09:06:27 -04:00
|
|
|
return "", f.formatErr
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestFormat(t *testing.T) {
|
2023-10-07 10:24:29 -04:00
|
|
|
formatter := func() *defaultAttestationDocFormatter {
|
|
|
|
return &defaultAttestationDocFormatter{
|
2023-04-03 09:06:27 -04:00
|
|
|
log: logger.NewTest(t),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := map[string]struct {
|
2023-10-07 10:24:29 -04:00
|
|
|
formatter *defaultAttestationDocFormatter
|
2023-04-03 09:06:27 -04:00
|
|
|
doc string
|
|
|
|
wantErr bool
|
|
|
|
}{
|
|
|
|
"invalid doc": {
|
|
|
|
formatter: formatter(),
|
|
|
|
doc: "invalid",
|
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tc := range testCases {
|
|
|
|
t.Run(name, func(t *testing.T) {
|
2023-11-07 06:17:08 -05:00
|
|
|
_, err := tc.formatter.format(context.Background(), tc.doc, false, nil)
|
2023-04-03 09:06:27 -04:00
|
|
|
if tc.wantErr {
|
|
|
|
assert.Error(t, err)
|
|
|
|
} else {
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-28 11:03:28 -04:00
|
|
|
func TestVerifyClient(t *testing.T) {
|
|
|
|
testCases := map[string]struct {
|
|
|
|
attestationDoc atls.FakeAttestationDoc
|
|
|
|
nonce []byte
|
|
|
|
attestationErr error
|
|
|
|
wantErr bool
|
|
|
|
}{
|
|
|
|
"success": {
|
|
|
|
attestationDoc: atls.FakeAttestationDoc{
|
2023-01-17 09:28:07 -05:00
|
|
|
UserData: []byte(constants.ConstellationVerifyServiceUserData),
|
2022-06-28 11:03:28 -04:00
|
|
|
Nonce: []byte("nonce"),
|
|
|
|
},
|
2023-01-17 09:28:07 -05:00
|
|
|
nonce: []byte("nonce"),
|
2022-06-28 11:03:28 -04:00
|
|
|
},
|
|
|
|
"attestation error": {
|
|
|
|
attestationDoc: atls.FakeAttestationDoc{
|
2023-01-17 09:28:07 -05:00
|
|
|
UserData: []byte(constants.ConstellationVerifyServiceUserData),
|
2022-06-28 11:03:28 -04:00
|
|
|
Nonce: []byte("nonce"),
|
|
|
|
},
|
|
|
|
nonce: []byte("nonce"),
|
|
|
|
attestationErr: errors.New("error"),
|
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
"user data does not match": {
|
|
|
|
attestationDoc: atls.FakeAttestationDoc{
|
|
|
|
UserData: []byte("wrong user data"),
|
|
|
|
Nonce: []byte("nonce"),
|
|
|
|
},
|
2023-01-17 09:28:07 -05:00
|
|
|
nonce: []byte("nonce"),
|
|
|
|
wantErr: true,
|
2022-06-28 11:03:28 -04:00
|
|
|
},
|
|
|
|
"nonce does not match": {
|
|
|
|
attestationDoc: atls.FakeAttestationDoc{
|
2023-01-17 09:28:07 -05:00
|
|
|
UserData: []byte(constants.ConstellationVerifyServiceUserData),
|
2022-06-28 11:03:28 -04:00
|
|
|
Nonce: []byte("wrong nonce"),
|
|
|
|
},
|
2023-01-17 09:28:07 -05:00
|
|
|
nonce: []byte("nonce"),
|
|
|
|
wantErr: true,
|
2022-06-28 11:03:28 -04:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tc := range testCases {
|
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
attestation, err := json.Marshal(tc.attestationDoc)
|
|
|
|
require.NoError(err)
|
|
|
|
verifyAPI := &stubVerifyAPI{
|
|
|
|
attestation: &verifyproto.GetAttestationResponse{Attestation: attestation},
|
|
|
|
attestationErr: tc.attestationErr,
|
|
|
|
}
|
|
|
|
|
|
|
|
netDialer := testdialer.NewBufconnDialer()
|
|
|
|
dialer := dialer.New(nil, nil, netDialer)
|
|
|
|
verifyServer := grpc.NewServer()
|
|
|
|
verifyproto.RegisterAPIServer(verifyServer, verifyAPI)
|
|
|
|
|
|
|
|
addr := net.JoinHostPort("192.0.2.1", strconv.Itoa(constants.VerifyServiceNodePortGRPC))
|
|
|
|
listener := netDialer.GetListener(addr)
|
|
|
|
go verifyServer.Serve(listener)
|
|
|
|
defer verifyServer.GracefulStop()
|
|
|
|
|
2022-11-21 11:02:33 -05:00
|
|
|
verifier := &constellationVerifier{dialer: dialer, log: logger.NewTest(t)}
|
2022-06-28 11:03:28 -04:00
|
|
|
request := &verifyproto.GetAttestationRequest{
|
2023-01-17 09:28:07 -05:00
|
|
|
Nonce: tc.nonce,
|
2022-06-28 11:03:28 -04:00
|
|
|
}
|
|
|
|
|
2023-04-03 09:06:27 -04:00
|
|
|
_, err = verifier.Verify(context.Background(), addr, request, atls.NewFakeValidator(variant.Dummy{}))
|
2022-06-28 11:03:28 -04:00
|
|
|
|
|
|
|
if tc.wantErr {
|
|
|
|
assert.Error(err)
|
|
|
|
} else {
|
|
|
|
assert.NoError(err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type stubVerifyClient struct {
|
|
|
|
verifyErr error
|
2022-07-01 04:57:29 -04:00
|
|
|
endpoint string
|
2022-06-28 11:03:28 -04:00
|
|
|
}
|
|
|
|
|
2023-04-03 09:06:27 -04:00
|
|
|
func (c *stubVerifyClient) Verify(_ context.Context, endpoint string, _ *verifyproto.GetAttestationRequest, _ atls.Validator) (string, error) {
|
2022-07-01 04:57:29 -04:00
|
|
|
c.endpoint = endpoint
|
2023-04-03 09:06:27 -04:00
|
|
|
return "", c.verifyErr
|
2022-06-28 11:03:28 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
type stubVerifyAPI struct {
|
|
|
|
attestation *verifyproto.GetAttestationResponse
|
|
|
|
attestationErr error
|
|
|
|
verifyproto.UnimplementedAPIServer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a stubVerifyAPI) GetAttestation(context.Context, *verifyproto.GetAttestationRequest) (*verifyproto.GetAttestationResponse, error) {
|
|
|
|
return a.attestation, a.attestationErr
|
|
|
|
}
|
2022-07-29 02:24:13 -04:00
|
|
|
|
|
|
|
func TestAddPortIfMissing(t *testing.T) {
|
|
|
|
testCases := map[string]struct {
|
|
|
|
endpoint string
|
|
|
|
defaultPort int
|
|
|
|
wantResult string
|
|
|
|
wantErr bool
|
|
|
|
}{
|
|
|
|
"ip and port": {
|
|
|
|
endpoint: "192.0.2.1:2",
|
|
|
|
defaultPort: 3,
|
|
|
|
wantResult: "192.0.2.1:2",
|
|
|
|
},
|
|
|
|
"hostname and port": {
|
|
|
|
endpoint: "foo:2",
|
|
|
|
defaultPort: 3,
|
|
|
|
wantResult: "foo:2",
|
|
|
|
},
|
|
|
|
"ip": {
|
|
|
|
endpoint: "192.0.2.1",
|
|
|
|
defaultPort: 3,
|
|
|
|
wantResult: "192.0.2.1:3",
|
|
|
|
},
|
|
|
|
"hostname": {
|
|
|
|
endpoint: "foo",
|
|
|
|
defaultPort: 3,
|
|
|
|
wantResult: "foo:3",
|
|
|
|
},
|
|
|
|
"empty endpoint": {
|
|
|
|
endpoint: "",
|
|
|
|
defaultPort: 3,
|
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
"invalid endpoint": {
|
|
|
|
endpoint: "foo:2:2",
|
|
|
|
defaultPort: 3,
|
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tc := range testCases {
|
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
res, err := addPortIfMissing(tc.endpoint, tc.defaultPort)
|
|
|
|
if tc.wantErr {
|
|
|
|
assert.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(err)
|
|
|
|
assert.Equal(tc.wantResult, res)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2023-09-08 02:08:09 -04:00
|
|
|
|
|
|
|
func TestParseQuotes(t *testing.T) {
|
|
|
|
testCases := map[string]struct {
|
|
|
|
quotes []*tpmProto.Quote
|
|
|
|
expectedPCRs measurements.M
|
|
|
|
wantOutput string
|
|
|
|
wantErr bool
|
|
|
|
}{
|
|
|
|
"parse quotes in order": {
|
|
|
|
quotes: []*tpmProto.Quote{
|
|
|
|
{
|
|
|
|
Pcrs: &tpmProto.PCRs{
|
|
|
|
Hash: tpmProto.HashAlgo_SHA256,
|
|
|
|
Pcrs: map[uint32][]byte{
|
|
|
|
0: {0x00},
|
|
|
|
1: {0x01},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedPCRs: measurements.M{
|
|
|
|
0: measurements.WithAllBytes(0x00, measurements.Enforce, 1),
|
|
|
|
1: measurements.WithAllBytes(0x01, measurements.WarnOnly, 1),
|
|
|
|
},
|
|
|
|
wantOutput: "\tQuote:\n\t\tPCR 0 (Strict: true):\n\t\t\tExpected:\t00\n\t\t\tActual:\t\t00\n\t\tPCR 1 (Strict: false):\n\t\t\tExpected:\t01\n\t\t\tActual:\t\t01\n",
|
|
|
|
},
|
|
|
|
"additional quotes are skipped": {
|
|
|
|
quotes: []*tpmProto.Quote{
|
|
|
|
{
|
|
|
|
Pcrs: &tpmProto.PCRs{
|
|
|
|
Hash: tpmProto.HashAlgo_SHA256,
|
|
|
|
Pcrs: map[uint32][]byte{
|
|
|
|
0: {0x00},
|
|
|
|
1: {0x01},
|
|
|
|
2: {0x02},
|
|
|
|
3: {0x03},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedPCRs: measurements.M{
|
|
|
|
0: measurements.WithAllBytes(0x00, measurements.Enforce, 1),
|
|
|
|
1: measurements.WithAllBytes(0x01, measurements.WarnOnly, 1),
|
|
|
|
},
|
|
|
|
wantOutput: "\tQuote:\n\t\tPCR 0 (Strict: true):\n\t\t\tExpected:\t00\n\t\t\tActual:\t\t00\n\t\tPCR 1 (Strict: false):\n\t\t\tExpected:\t01\n\t\t\tActual:\t\t01\n",
|
|
|
|
},
|
|
|
|
"missing quotes error": {
|
|
|
|
quotes: []*tpmProto.Quote{
|
|
|
|
{
|
|
|
|
Pcrs: &tpmProto.PCRs{
|
|
|
|
Hash: tpmProto.HashAlgo_SHA256,
|
|
|
|
Pcrs: map[uint32][]byte{
|
|
|
|
0: {0x00},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedPCRs: measurements.M{
|
|
|
|
0: measurements.WithAllBytes(0x00, measurements.Enforce, 1),
|
|
|
|
1: measurements.WithAllBytes(0x01, measurements.WarnOnly, 1),
|
|
|
|
},
|
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
"no quotes error": {
|
|
|
|
quotes: []*tpmProto.Quote{},
|
|
|
|
expectedPCRs: measurements.M{
|
|
|
|
0: measurements.WithAllBytes(0x00, measurements.Enforce, 1),
|
|
|
|
1: measurements.WithAllBytes(0x01, measurements.WarnOnly, 1),
|
|
|
|
},
|
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tc := range testCases {
|
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
b := &strings.Builder{}
|
2023-10-07 10:24:29 -04:00
|
|
|
parser := &defaultAttestationDocFormatter{}
|
2023-09-08 02:08:09 -04:00
|
|
|
|
|
|
|
err := parser.parseQuotes(b, tc.quotes, tc.expectedPCRs)
|
|
|
|
if tc.wantErr {
|
|
|
|
assert.Error(err)
|
|
|
|
} else {
|
|
|
|
assert.NoError(err)
|
|
|
|
assert.Equal(tc.wantOutput, b.String())
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2023-12-05 06:28:40 -05:00
|
|
|
|
|
|
|
func TestValidatorUpdateInitPCRs(t *testing.T) {
|
|
|
|
zero := measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength)
|
|
|
|
one := measurements.WithAllBytes(0x11, measurements.WarnOnly, measurements.PCRMeasurementLength)
|
|
|
|
one64 := base64.StdEncoding.EncodeToString(one.Expected[:])
|
|
|
|
oneHash := sha256.Sum256(one.Expected[:])
|
|
|
|
pcrZeroUpdatedOne := sha256.Sum256(append(zero.Expected[:], oneHash[:]...))
|
|
|
|
newTestPCRs := func() measurements.M {
|
|
|
|
return measurements.M{
|
|
|
|
0: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
1: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
2: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
3: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
4: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
5: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
6: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
7: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
8: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
9: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
10: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
11: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
12: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
13: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
14: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
15: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
16: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
17: measurements.WithAllBytes(0x11, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
18: measurements.WithAllBytes(0x11, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
19: measurements.WithAllBytes(0x11, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
20: measurements.WithAllBytes(0x11, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
21: measurements.WithAllBytes(0x11, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
22: measurements.WithAllBytes(0x11, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
23: measurements.WithAllBytes(0x00, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := map[string]struct {
|
|
|
|
config config.AttestationCfg
|
|
|
|
ownerID string
|
|
|
|
clusterID string
|
|
|
|
wantErr bool
|
|
|
|
}{
|
|
|
|
"gcp update owner ID": {
|
|
|
|
config: &config.GCPSEVES{
|
|
|
|
Measurements: newTestPCRs(),
|
|
|
|
},
|
|
|
|
ownerID: one64,
|
|
|
|
},
|
|
|
|
"gcp update cluster ID": {
|
|
|
|
config: &config.GCPSEVES{
|
|
|
|
Measurements: newTestPCRs(),
|
|
|
|
},
|
|
|
|
clusterID: one64,
|
|
|
|
},
|
|
|
|
"gcp update both": {
|
|
|
|
config: &config.GCPSEVES{
|
|
|
|
Measurements: newTestPCRs(),
|
|
|
|
},
|
|
|
|
ownerID: one64,
|
|
|
|
clusterID: one64,
|
|
|
|
},
|
|
|
|
"azure update owner ID": {
|
|
|
|
config: &config.AzureSEVSNP{
|
|
|
|
Measurements: newTestPCRs(),
|
|
|
|
},
|
|
|
|
ownerID: one64,
|
|
|
|
},
|
|
|
|
"azure update cluster ID": {
|
|
|
|
config: &config.AzureSEVSNP{
|
|
|
|
Measurements: newTestPCRs(),
|
|
|
|
},
|
|
|
|
clusterID: one64,
|
|
|
|
},
|
|
|
|
"azure update both": {
|
|
|
|
config: &config.AzureSEVSNP{
|
|
|
|
Measurements: newTestPCRs(),
|
|
|
|
},
|
|
|
|
ownerID: one64,
|
|
|
|
clusterID: one64,
|
|
|
|
},
|
|
|
|
"owner ID and cluster ID empty": {
|
|
|
|
config: &config.AzureSEVSNP{
|
|
|
|
Measurements: newTestPCRs(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"invalid encoding": {
|
|
|
|
config: &config.GCPSEVES{
|
|
|
|
Measurements: newTestPCRs(),
|
|
|
|
},
|
|
|
|
ownerID: "invalid",
|
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tc := range testCases {
|
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
err := updateInitMeasurements(tc.config, tc.ownerID, tc.clusterID)
|
|
|
|
|
|
|
|
if tc.wantErr {
|
|
|
|
assert.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
m := tc.config.GetMeasurements()
|
|
|
|
for i := 0; i < len(m); i++ {
|
|
|
|
switch {
|
|
|
|
case i == int(measurements.PCRIndexClusterID) && tc.clusterID == "":
|
|
|
|
// should be deleted
|
|
|
|
_, ok := m[uint32(i)]
|
|
|
|
assert.False(ok)
|
|
|
|
|
|
|
|
case i == int(measurements.PCRIndexClusterID):
|
|
|
|
pcr, ok := m[uint32(i)]
|
|
|
|
assert.True(ok)
|
|
|
|
assert.Equal(pcrZeroUpdatedOne[:], pcr.Expected)
|
|
|
|
|
|
|
|
case i == int(measurements.PCRIndexOwnerID) && tc.ownerID == "":
|
|
|
|
// should be deleted
|
|
|
|
_, ok := m[uint32(i)]
|
|
|
|
assert.False(ok)
|
|
|
|
|
|
|
|
case i == int(measurements.PCRIndexOwnerID):
|
|
|
|
pcr, ok := m[uint32(i)]
|
|
|
|
assert.True(ok)
|
|
|
|
assert.Equal(pcrZeroUpdatedOne[:], pcr.Expected)
|
|
|
|
|
|
|
|
default:
|
|
|
|
if i >= 17 && i <= 22 {
|
|
|
|
assert.Equal(one, m[uint32(i)])
|
|
|
|
} else {
|
|
|
|
assert.Equal(zero, m[uint32(i)])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestValidatorUpdateInitMeasurementsTDX(t *testing.T) {
|
|
|
|
zero := measurements.WithAllBytes(0x00, true, measurements.TDXMeasurementLength)
|
|
|
|
one := measurements.WithAllBytes(0x11, true, measurements.TDXMeasurementLength)
|
|
|
|
one64 := base64.StdEncoding.EncodeToString(one.Expected[:])
|
|
|
|
oneHash := sha512.Sum384(one.Expected[:])
|
|
|
|
tdxZeroUpdatedOne := sha512.Sum384(append(zero.Expected[:], oneHash[:]...))
|
|
|
|
newTestTDXMeasurements := func() measurements.M {
|
|
|
|
return measurements.M{
|
|
|
|
0: measurements.WithAllBytes(0x00, true, measurements.TDXMeasurementLength),
|
|
|
|
1: measurements.WithAllBytes(0x00, true, measurements.TDXMeasurementLength),
|
|
|
|
2: measurements.WithAllBytes(0x00, true, measurements.TDXMeasurementLength),
|
|
|
|
3: measurements.WithAllBytes(0x00, true, measurements.TDXMeasurementLength),
|
|
|
|
4: measurements.WithAllBytes(0x00, true, measurements.TDXMeasurementLength),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := map[string]struct {
|
|
|
|
measurements measurements.M
|
|
|
|
clusterID string
|
|
|
|
wantErr bool
|
|
|
|
}{
|
|
|
|
"QEMUT TDX update update cluster ID": {
|
|
|
|
measurements: newTestTDXMeasurements(),
|
|
|
|
clusterID: one64,
|
|
|
|
},
|
|
|
|
"cluster ID empty": {
|
|
|
|
measurements: newTestTDXMeasurements(),
|
|
|
|
},
|
|
|
|
"invalid encoding": {
|
|
|
|
measurements: newTestTDXMeasurements(),
|
|
|
|
clusterID: "invalid",
|
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tc := range testCases {
|
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
cfg := &config.QEMUTDX{Measurements: tc.measurements}
|
|
|
|
|
|
|
|
err := updateInitMeasurements(cfg, "", tc.clusterID)
|
|
|
|
|
|
|
|
if tc.wantErr {
|
|
|
|
assert.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
assert.NoError(err)
|
|
|
|
for i := 0; i < len(tc.measurements); i++ {
|
|
|
|
switch {
|
|
|
|
case i == measurements.TDXIndexClusterID && tc.clusterID == "":
|
|
|
|
// should be deleted
|
|
|
|
_, ok := cfg.Measurements[uint32(i)]
|
|
|
|
assert.False(ok)
|
|
|
|
|
|
|
|
case i == measurements.TDXIndexClusterID:
|
|
|
|
pcr, ok := cfg.Measurements[uint32(i)]
|
|
|
|
assert.True(ok)
|
|
|
|
assert.Equal(tdxZeroUpdatedOne[:], pcr.Expected)
|
|
|
|
|
|
|
|
default:
|
|
|
|
assert.Equal(zero, cfg.Measurements[uint32(i)])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|