mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-02-04 09:05:31 -05:00
Delete Coordinator core and apis
This commit is contained in:
parent
e534c6a338
commit
32f1f5fd3e
@ -1,6 +1,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -9,29 +10,26 @@ import (
|
||||
"net"
|
||||
"strconv"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/cli/internal/azure"
|
||||
"github.com/edgelesssys/constellation/cli/internal/cloudcmd"
|
||||
"github.com/edgelesssys/constellation/cli/internal/gcp"
|
||||
"github.com/edgelesssys/constellation/cli/internal/proto"
|
||||
"github.com/edgelesssys/constellation/cli/internal/vpn"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
coordinatorstate "github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/coordinator/initproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/kms"
|
||||
"github.com/edgelesssys/constellation/coordinator/util"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/internal/cloud/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/internal/config"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/retry"
|
||||
"github.com/edgelesssys/constellation/internal/state"
|
||||
"github.com/edgelesssys/constellation/internal/statuswaiter"
|
||||
"github.com/kr/text"
|
||||
wgquick "github.com/nmiculinic/wg-quick-go"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// NewInitCmd returns a new cobra.Command for the init command.
|
||||
@ -44,10 +42,7 @@ func NewInitCmd() *cobra.Command {
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: runInitialize,
|
||||
}
|
||||
cmd.Flags().String("privatekey", "", "path to your private key")
|
||||
cmd.Flags().String("master-secret", "", "path to base64-encoded master secret")
|
||||
cmd.Flags().Bool("wg-autoconfig", false, "enable automatic configuration of WireGuard interface")
|
||||
must(cmd.Flags().MarkHidden("wg-autoconfig"))
|
||||
cmd.Flags().Bool("autoscale", false, "enable Kubernetes cluster-autoscaler")
|
||||
return cmd
|
||||
}
|
||||
@ -55,19 +50,15 @@ func NewInitCmd() *cobra.Command {
|
||||
// runInitialize runs the initialize command.
|
||||
func runInitialize(cmd *cobra.Command, args []string) error {
|
||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||
vpnHandler := vpn.NewConfigHandler()
|
||||
serviceAccountCreator := cloudcmd.NewServiceAccountCreator()
|
||||
waiter := statuswaiter.New()
|
||||
protoClient := &proto.Client{}
|
||||
defer protoClient.Close()
|
||||
|
||||
return initialize(cmd, protoClient, serviceAccountCreator, fileHandler, waiter, vpnHandler)
|
||||
dialer := dialer.New(nil, nil, &net.Dialer{})
|
||||
return initialize(cmd, dialer, serviceAccountCreator, fileHandler)
|
||||
}
|
||||
|
||||
// initialize initializes a Constellation. Coordinator instances are activated as contole-plane nodes and will
|
||||
// themself activate the other peers as workers.
|
||||
func initialize(cmd *cobra.Command, protCl protoClient, serviceAccCreator serviceAccountCreator,
|
||||
fileHandler file.Handler, waiter statusWaiter, vpnHandler vpnHandler,
|
||||
func initialize(cmd *cobra.Command, dialer grpcDialer, serviceAccCreator serviceAccountCreator,
|
||||
fileHandler file.Handler,
|
||||
) error {
|
||||
flags, err := evalFlagArgs(cmd, fileHandler)
|
||||
if err != nil {
|
||||
@ -117,153 +108,79 @@ func initialize(cmd *cobra.Command, protCl protoClient, serviceAccCreator servic
|
||||
return err
|
||||
}
|
||||
|
||||
endpoints := ipsToEndpoints(append(coordinators.PublicIPs(), nodes.PublicIPs()...), strconv.Itoa(constants.CoordinatorPort))
|
||||
|
||||
cmd.Println("Waiting for cloud provider resource creation and boot ...")
|
||||
if err := waiter.InitializeValidators(validators.V()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := waiter.WaitForAll(cmd.Context(), endpoints, coordinatorstate.AcceptingInit); err != nil {
|
||||
return fmt.Errorf("waiting for all peers status: %w", err)
|
||||
}
|
||||
|
||||
var autoscalingNodeGroups []string
|
||||
if flags.autoscale {
|
||||
autoscalingNodeGroups = append(autoscalingNodeGroups, nodes.GroupID)
|
||||
}
|
||||
|
||||
input := activationInput{
|
||||
coordinatorPubIP: coordinators.PublicIPs()[0],
|
||||
pubKey: flags.userPubKey,
|
||||
masterSecret: flags.masterSecret,
|
||||
nodePrivIPs: nodes.PrivateIPs(),
|
||||
coordinatorPrivIPs: coordinators.PrivateIPs()[1:],
|
||||
autoscalingNodeGroups: autoscalingNodeGroups,
|
||||
cloudServiceAccountURI: serviceAccount,
|
||||
sshUserKeys: ssh.ToProtoSlice(sshUsers),
|
||||
req := &initproto.InitRequest{
|
||||
AutoscalingNodeGroups: autoscalingNodeGroups,
|
||||
MasterSecret: flags.masterSecret,
|
||||
KmsUri: kms.ClusterKMSURI,
|
||||
StorageUri: kms.NoStoreURI,
|
||||
KeyEncryptionKeyId: "",
|
||||
UseExistingKek: false,
|
||||
CloudServiceAccountUri: serviceAccount,
|
||||
KubernetesVersion: "1.23.6",
|
||||
SshUserKeys: ssh.ToProtoSlice(sshUsers),
|
||||
}
|
||||
result, err := activate(cmd, protCl, input, validators.V())
|
||||
resp, err := initCall(cmd.Context(), dialer, coordinators.PublicIPs()[0], req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = result.writeOutput(cmd.OutOrStdout(), fileHandler)
|
||||
if err != nil {
|
||||
if err := writeOutput(resp, cmd.OutOrStdout(), fileHandler); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vpnConfig, err := vpnHandler.Create(result.coordinatorPubKey, result.coordinatorPubIP, string(flags.userPrivKey), result.clientVpnIP, constants.WireguardAdminMTU)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeWGQuickFile(fileHandler, vpnHandler, vpnConfig); err != nil {
|
||||
return fmt.Errorf("writing wg-quick file: %w", err)
|
||||
}
|
||||
|
||||
if flags.autoconfigureWG {
|
||||
if err := vpnHandler.Apply(vpnConfig); err != nil {
|
||||
return fmt.Errorf("configuring WireGuard: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func activate(cmd *cobra.Command, client protoClient, input activationInput,
|
||||
validators []atls.Validator,
|
||||
) (activationResult, error) {
|
||||
err := client.Connect(net.JoinHostPort(input.coordinatorPubIP, strconv.Itoa(constants.CoordinatorPort)), validators)
|
||||
if err != nil {
|
||||
return activationResult{}, err
|
||||
func initCall(ctx context.Context, dialer grpcDialer, ip string, req *initproto.InitRequest) (*initproto.InitResponse, error) {
|
||||
doer := &initDoer{
|
||||
dialer: dialer,
|
||||
endpoint: net.JoinHostPort(ip, strconv.Itoa(constants.CoordinatorPort)),
|
||||
req: req,
|
||||
}
|
||||
|
||||
respCl, err := client.Activate(cmd.Context(), input.pubKey, input.masterSecret, input.nodePrivIPs, input.coordinatorPrivIPs, input.autoscalingNodeGroups, input.cloudServiceAccountURI, input.sshUserKeys)
|
||||
if err != nil {
|
||||
return activationResult{}, err
|
||||
retryer := retry.NewIntervalRetryer(doer, 30*time.Second)
|
||||
if err := retryer.Do(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indentOut := text.NewIndentWriter(cmd.OutOrStdout(), []byte{'\t'})
|
||||
cmd.Println("Activating the cluster ...")
|
||||
if err := respCl.WriteLogStream(indentOut); err != nil {
|
||||
return activationResult{}, err
|
||||
}
|
||||
|
||||
clientVpnIp, err := respCl.GetClientVpnIp()
|
||||
if err != nil {
|
||||
return activationResult{}, err
|
||||
}
|
||||
coordinatorPubKey, err := respCl.GetCoordinatorVpnKey()
|
||||
if err != nil {
|
||||
return activationResult{}, err
|
||||
}
|
||||
kubeconfig, err := respCl.GetKubeconfig()
|
||||
if err != nil {
|
||||
return activationResult{}, err
|
||||
}
|
||||
ownerID, err := respCl.GetOwnerID()
|
||||
if err != nil {
|
||||
return activationResult{}, err
|
||||
}
|
||||
clusterID, err := respCl.GetClusterID()
|
||||
if err != nil {
|
||||
return activationResult{}, err
|
||||
}
|
||||
|
||||
return activationResult{
|
||||
clientVpnIP: clientVpnIp,
|
||||
coordinatorPubKey: coordinatorPubKey,
|
||||
coordinatorPubIP: input.coordinatorPubIP,
|
||||
kubeconfig: kubeconfig,
|
||||
ownerID: ownerID,
|
||||
clusterID: clusterID,
|
||||
}, nil
|
||||
return doer.resp, nil
|
||||
}
|
||||
|
||||
type activationInput struct {
|
||||
coordinatorPubIP string
|
||||
pubKey []byte
|
||||
masterSecret []byte
|
||||
nodePrivIPs []string
|
||||
coordinatorPrivIPs []string
|
||||
autoscalingNodeGroups []string
|
||||
cloudServiceAccountURI string
|
||||
sshUserKeys []*pubproto.SSHUserKey
|
||||
type initDoer struct {
|
||||
dialer grpcDialer
|
||||
endpoint string
|
||||
req *initproto.InitRequest
|
||||
resp *initproto.InitResponse
|
||||
}
|
||||
|
||||
type activationResult struct {
|
||||
clientVpnIP string
|
||||
coordinatorPubKey string
|
||||
coordinatorPubIP string
|
||||
kubeconfig string
|
||||
ownerID string
|
||||
clusterID string
|
||||
}
|
||||
|
||||
// writeWGQuickFile writes the wg-quick file to the default path.
|
||||
func writeWGQuickFile(fileHandler file.Handler, vpnHandler vpnHandler, vpnConfig *wgquick.Config) error {
|
||||
data, err := vpnHandler.Marshal(vpnConfig)
|
||||
func (d *initDoer) Do(ctx context.Context) error {
|
||||
conn, err := d.dialer.Dial(ctx, d.endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("dialing init server: %w", err)
|
||||
}
|
||||
protoClient := initproto.NewAPIClient(conn)
|
||||
resp, err := protoClient.Init(ctx, d.req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshalling VPN config: %w", err)
|
||||
}
|
||||
return fileHandler.Write(constants.WGQuickConfigFilename, data, file.OptNone)
|
||||
d.resp = resp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r activationResult) writeOutput(wr io.Writer, fileHandler file.Handler) error {
|
||||
func writeOutput(resp *initproto.InitResponse, wr io.Writer, fileHandler file.Handler) error {
|
||||
fmt.Fprint(wr, "Your Constellation cluster was successfully initialized.\n\n")
|
||||
|
||||
tw := tabwriter.NewWriter(wr, 0, 0, 2, ' ', 0)
|
||||
writeRow(tw, "Your WireGuard IP", r.clientVpnIP)
|
||||
writeRow(tw, "Control plane's public IP", r.coordinatorPubIP)
|
||||
writeRow(tw, "Control plane's public key", r.coordinatorPubKey)
|
||||
writeRow(tw, "Constellation cluster's owner identifier", r.ownerID)
|
||||
writeRow(tw, "Constellation cluster's unique identifier", r.clusterID)
|
||||
writeRow(tw, "WireGuard configuration file", constants.WGQuickConfigFilename)
|
||||
writeRow(tw, "Constellation cluster's owner identifier", string(resp.OwnerId))
|
||||
writeRow(tw, "Constellation cluster's unique identifier", string(resp.ClusterId))
|
||||
writeRow(tw, "Kubernetes configuration", constants.AdminConfFilename)
|
||||
tw.Flush()
|
||||
fmt.Fprintln(wr)
|
||||
|
||||
if err := fileHandler.Write(constants.AdminConfFilename, []byte(r.kubeconfig), file.OptNone); err != nil {
|
||||
if err := fileHandler.Write(constants.AdminConfFilename, resp.Kubeconfig, file.OptNone); err != nil {
|
||||
return fmt.Errorf("write kubeconfig: %w", err)
|
||||
}
|
||||
|
||||
@ -273,7 +190,6 @@ func (r activationResult) writeOutput(wr io.Writer, fileHandler file.Handler) er
|
||||
}
|
||||
|
||||
fmt.Fprintln(wr, "You can now connect to your cluster by executing:")
|
||||
fmt.Fprintf(wr, "\twg-quick up ./%s\n", constants.WGQuickConfigFilename)
|
||||
fmt.Fprintf(wr, "\texport KUBECONFIG=\"$PWD/%s\"\n", constants.AdminConfFilename)
|
||||
return nil
|
||||
}
|
||||
@ -285,18 +201,6 @@ func writeRow(wr io.Writer, col1 string, col2 string) {
|
||||
// evalFlagArgs gets the flag values and does preprocessing of these values like
|
||||
// reading the content from file path flags and deriving other values from flag combinations.
|
||||
func evalFlagArgs(cmd *cobra.Command, fileHandler file.Handler) (initFlags, error) {
|
||||
userPrivKeyPath, err := cmd.Flags().GetString("privatekey")
|
||||
if err != nil {
|
||||
return initFlags{}, fmt.Errorf("parsing privatekey path argument: %w", err)
|
||||
}
|
||||
userPrivKey, userPubKey, err := readOrGenerateVPNKey(fileHandler, userPrivKeyPath)
|
||||
if err != nil {
|
||||
return initFlags{}, err
|
||||
}
|
||||
autoconfigureWG, err := cmd.Flags().GetBool("wg-autoconfig")
|
||||
if err != nil {
|
||||
return initFlags{}, fmt.Errorf("parsing wg-autoconfig argument: %w", err)
|
||||
}
|
||||
masterSecretPath, err := cmd.Flags().GetString("master-secret")
|
||||
if err != nil {
|
||||
return initFlags{}, fmt.Errorf("parsing master-secret path argument: %w", err)
|
||||
@ -315,58 +219,17 @@ func evalFlagArgs(cmd *cobra.Command, fileHandler file.Handler) (initFlags, erro
|
||||
}
|
||||
|
||||
return initFlags{
|
||||
configPath: configPath,
|
||||
userPrivKey: userPrivKey,
|
||||
userPubKey: userPubKey,
|
||||
autoconfigureWG: autoconfigureWG,
|
||||
autoscale: autoscale,
|
||||
masterSecret: masterSecret,
|
||||
configPath: configPath,
|
||||
autoscale: autoscale,
|
||||
masterSecret: masterSecret,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// initFlags are the resulting values of flag preprocessing.
|
||||
type initFlags struct {
|
||||
configPath string
|
||||
userPrivKey []byte
|
||||
userPubKey []byte
|
||||
masterSecret []byte
|
||||
autoconfigureWG bool
|
||||
autoscale bool
|
||||
}
|
||||
|
||||
func readOrGenerateVPNKey(fileHandler file.Handler, privKeyPath string) (privKey, pubKey []byte, err error) {
|
||||
var privKeyParsed wgtypes.Key
|
||||
if privKeyPath == "" {
|
||||
privKeyParsed, err = wgtypes.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("generating WireGuard private key: %w", err)
|
||||
}
|
||||
privKey = []byte(privKeyParsed.String())
|
||||
} else {
|
||||
privKey, err = fileHandler.Read(privKeyPath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("reading the VPN private key: %w", err)
|
||||
}
|
||||
privKeyParsed, err = wgtypes.ParseKey(string(privKey))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing the WireGuard private key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
pubKey = []byte(privKeyParsed.PublicKey().String())
|
||||
|
||||
return privKey, pubKey, nil
|
||||
}
|
||||
|
||||
func ipsToEndpoints(ips []string, port string) []string {
|
||||
var endpoints []string
|
||||
for _, ip := range ips {
|
||||
if ip == "" {
|
||||
continue
|
||||
}
|
||||
endpoints = append(endpoints, net.JoinHostPort(ip, port))
|
||||
}
|
||||
return endpoints
|
||||
configPath string
|
||||
masterSecret []byte
|
||||
autoscale bool
|
||||
}
|
||||
|
||||
// readOrGenerateMasterSecret reads a base64 encoded master secret from file or generates a new 32 byte secret.
|
||||
@ -491,3 +354,7 @@ func initCompletion(cmd *cobra.Command, args []string, toComplete string) ([]str
|
||||
}
|
||||
return []string{}, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
type grpcDialer interface {
|
||||
Dial(ctx context.Context, target string) (*grpc.ClientConn, error)
|
||||
}
|
||||
|
@ -5,21 +5,25 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/initproto"
|
||||
"github.com/edgelesssys/constellation/internal/cloud/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/testdialer"
|
||||
"github.com/edgelesssys/constellation/internal/state"
|
||||
wgquick "github.com/nmiculinic/wg-quick-go"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestInitArgumentValidation(t *testing.T) {
|
||||
@ -32,7 +36,6 @@ func TestInitArgumentValidation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
testKey := base64.StdEncoding.EncodeToString([]byte("32bytesWireGuardKeyForTheTesting"))
|
||||
testGcpState := state.ConstellationState{
|
||||
CloudProvider: "GCP",
|
||||
GCPNodes: cloudtypes.Instances{
|
||||
@ -64,221 +67,73 @@ func TestInitialize(t *testing.T) {
|
||||
"id-c": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||
},
|
||||
}
|
||||
testActivationResps := []fakeActivationRespMessage{
|
||||
{log: "testlog1"},
|
||||
{log: "testlog2"},
|
||||
{
|
||||
kubeconfig: "kubeconfig",
|
||||
clientVpnIp: "192.0.2.2",
|
||||
coordinatorVpnKey: testKey,
|
||||
ownerID: "ownerID",
|
||||
clusterID: "clusterID",
|
||||
},
|
||||
{log: "testlog3"},
|
||||
testInitResp := &initproto.InitResponse{
|
||||
Kubeconfig: []byte("kubeconfig"),
|
||||
OwnerId: []byte("ownerID"),
|
||||
ClusterId: []byte("clusterID"),
|
||||
}
|
||||
someErr := errors.New("failed")
|
||||
// someErr := errors.New("failed")
|
||||
|
||||
testCases := map[string]struct {
|
||||
existingState state.ConstellationState
|
||||
client protoClient
|
||||
serviceAccountCreator stubServiceAccountCreator
|
||||
waiter statusWaiter
|
||||
privKey string
|
||||
vpnHandler vpnHandler
|
||||
initVPN bool
|
||||
initServerAPI *stubInitServer
|
||||
setAutoscaleFlag bool
|
||||
wantErr bool
|
||||
}{
|
||||
"initialize some gcp instances": {
|
||||
existingState: testGcpState,
|
||||
client: &fakeProtoClient{
|
||||
respClient: &fakeActivationRespClient{responses: testActivationResps},
|
||||
},
|
||||
waiter: &stubStatusWaiter{},
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
privKey: testKey,
|
||||
initServerAPI: &stubInitServer{initResp: testInitResp},
|
||||
},
|
||||
"initialize some azure instances": {
|
||||
existingState: testAzureState,
|
||||
client: &fakeProtoClient{
|
||||
respClient: &fakeActivationRespClient{responses: testActivationResps},
|
||||
},
|
||||
waiter: &stubStatusWaiter{},
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
privKey: testKey,
|
||||
initServerAPI: &stubInitServer{initResp: testInitResp},
|
||||
},
|
||||
"initialize some qemu instances": {
|
||||
existingState: testQemuState,
|
||||
client: &fakeProtoClient{
|
||||
respClient: &fakeActivationRespClient{responses: testActivationResps},
|
||||
},
|
||||
waiter: &stubStatusWaiter{},
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
privKey: testKey,
|
||||
initServerAPI: &stubInitServer{initResp: testInitResp},
|
||||
},
|
||||
"initialize vpn": {
|
||||
existingState: testAzureState,
|
||||
client: &fakeProtoClient{
|
||||
respClient: &fakeActivationRespClient{responses: testActivationResps},
|
||||
},
|
||||
waiter: &stubStatusWaiter{},
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
initVPN: true,
|
||||
privKey: testKey,
|
||||
"initialize gcp with autoscaling": {
|
||||
existingState: testGcpState,
|
||||
initServerAPI: &stubInitServer{initResp: testInitResp},
|
||||
setAutoscaleFlag: true,
|
||||
},
|
||||
"invalid initialize vpn": {
|
||||
existingState: testAzureState,
|
||||
client: &fakeProtoClient{
|
||||
respClient: &fakeActivationRespClient{responses: testActivationResps},
|
||||
},
|
||||
waiter: &stubStatusWaiter{},
|
||||
vpnHandler: &stubVPNHandler{applyErr: someErr},
|
||||
initVPN: true,
|
||||
privKey: testKey,
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid create vpn config": {
|
||||
existingState: testAzureState,
|
||||
client: &fakeProtoClient{
|
||||
respClient: &fakeActivationRespClient{responses: testActivationResps},
|
||||
},
|
||||
waiter: &stubStatusWaiter{},
|
||||
vpnHandler: &stubVPNHandler{createErr: someErr},
|
||||
initVPN: true,
|
||||
privKey: testKey,
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid write vpn config": {
|
||||
existingState: testAzureState,
|
||||
client: &fakeProtoClient{
|
||||
respClient: &fakeActivationRespClient{responses: testActivationResps},
|
||||
},
|
||||
waiter: &stubStatusWaiter{},
|
||||
vpnHandler: &stubVPNHandler{marshalErr: someErr},
|
||||
initVPN: true,
|
||||
privKey: testKey,
|
||||
wantErr: true,
|
||||
},
|
||||
"no state exists": {
|
||||
existingState: state.ConstellationState{},
|
||||
client: &stubProtoClient{},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"no instances to pick one": {
|
||||
existingState: state.ConstellationState{GCPNodes: cloudtypes.Instances{}},
|
||||
client: &stubProtoClient{},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"public key to short": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: base64.StdEncoding.EncodeToString([]byte("tooShortKey")),
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"public key to long": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: base64.StdEncoding.EncodeToString([]byte("thisWireguardKeyIsToLongAndHasTooManyBytes")),
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"public key not base64": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: "this is not base64 encoded",
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail Connect": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{connectErr: someErr},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail Activate": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{activateErr: someErr},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail respClient WriteLogStream": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{respClient: &stubActivationRespClient{writeLogStreamErr: someErr}},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail respClient getKubeconfig": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{respClient: &stubActivationRespClient{getKubeconfigErr: someErr}},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail respClient getCoordinatorVpnKey": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{respClient: &stubActivationRespClient{getCoordinatorVpnKeyErr: someErr}},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail respClient getClientVpnIp": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{respClient: &stubActivationRespClient{getClientVpnIpErr: someErr}},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail respClient getOwnerID": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{respClient: &stubActivationRespClient{getOwnerIDErr: someErr}},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail respClient getClusterID": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{respClient: &stubActivationRespClient{getClusterIDErr: someErr}},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail to wait for required status": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{},
|
||||
waiter: &stubStatusWaiter{waitForAllErr: someErr},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail to create service account": {
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{},
|
||||
serviceAccountCreator: stubServiceAccountCreator{createErr: someErr},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
vpnHandler: &stubVPNHandler{},
|
||||
wantErr: true,
|
||||
"initialize azure with autoscaling": {
|
||||
existingState: testAzureState,
|
||||
initServerAPI: &stubInitServer{initResp: testInitResp},
|
||||
setAutoscaleFlag: true,
|
||||
},
|
||||
// "no state exists": {
|
||||
// existingState: state.ConstellationState{},
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// wantErr: true,
|
||||
// },
|
||||
// "no instances to pick one": {
|
||||
// existingState: state.ConstellationState{GCPNodes: cloudtypes.Instances{}},
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// wantErr: true,
|
||||
// },
|
||||
// "fail Connect": {
|
||||
// existingState: testGcpState,
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// wantErr: true,
|
||||
// },
|
||||
// "fail Activate": {
|
||||
// existingState: testGcpState,
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// wantErr: true,
|
||||
// },
|
||||
// "fail to wait for required status": {
|
||||
// existingState: testGcpState,
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// wantErr: true,
|
||||
// },
|
||||
// "fail to create service account": {
|
||||
// existingState: testGcpState,
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// serviceAccountCreator: stubServiceAccountCreator{createErr: someErr},
|
||||
// wantErr: true,
|
||||
// },
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
@ -286,6 +141,16 @@ func TestInitialize(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, nil, netDialer)
|
||||
serverCreds := atlscredentials.New(nil, nil)
|
||||
initServer := grpc.NewServer(grpc.Creds(serverCreds))
|
||||
initproto.RegisterAPIServer(initServer, tc.initServerAPI)
|
||||
port := strconv.Itoa(constants.CoordinatorPort)
|
||||
listener := netDialer.GetListener(net.JoinHostPort("192.0.2.1", port))
|
||||
go initServer.Serve(listener)
|
||||
defer initServer.GracefulStop()
|
||||
|
||||
cmd := NewInitCmd()
|
||||
var out bytes.Buffer
|
||||
cmd.SetOut(&out)
|
||||
@ -295,29 +160,27 @@ func TestInitialize(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
fileHandler := file.NewHandler(fs)
|
||||
require.NoError(fileHandler.WriteJSON(constants.StateFilename, tc.existingState, file.OptNone))
|
||||
|
||||
// Write key file to filesystem and set path in flag.
|
||||
require.NoError(afero.Afero{Fs: fs}.WriteFile("privK", []byte(tc.privKey), 0o600))
|
||||
require.NoError(cmd.Flags().Set("privatekey", "privK"))
|
||||
if tc.initVPN {
|
||||
require.NoError(cmd.Flags().Set("wg-autoconfig", "true"))
|
||||
}
|
||||
require.NoError(cmd.Flags().Set("autoscale", strconv.FormatBool(tc.setAutoscaleFlag)))
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
|
||||
defer cancel()
|
||||
cmd.SetContext(ctx)
|
||||
|
||||
err := initialize(cmd, tc.client, &tc.serviceAccountCreator, fileHandler, tc.waiter, tc.vpnHandler)
|
||||
err := initialize(cmd, dialer, &tc.serviceAccountCreator, fileHandler)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Contains(out.String(), "192.0.2.2")
|
||||
assert.Contains(out.String(), "ownerID")
|
||||
assert.Contains(out.String(), "clusterID")
|
||||
if tc.setAutoscaleFlag {
|
||||
assert.Len(tc.initServerAPI.activateAutoscalingNodeGroups, 1)
|
||||
} else {
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.initVPN, tc.vpnHandler.(*stubVPNHandler).configured)
|
||||
assert.Contains(out.String(), "192.0.2.2")
|
||||
assert.Contains(out.String(), "ownerID")
|
||||
assert.Contains(out.String(), "clusterID")
|
||||
assert.Len(tc.initServerAPI.activateAutoscalingNodeGroups, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -326,11 +189,10 @@ func TestInitialize(t *testing.T) {
|
||||
func TestWriteOutput(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
result := activationResult{
|
||||
clientVpnIP: "foo-qq",
|
||||
coordinatorPubKey: "bar-qq",
|
||||
coordinatorPubIP: "baz-qq",
|
||||
kubeconfig: "foo-bar-baz-qq",
|
||||
resp := &initproto.InitResponse{
|
||||
OwnerId: []byte("ownerID"),
|
||||
ClusterId: []byte("clusterID"),
|
||||
Kubeconfig: []byte("kubeconfig"),
|
||||
}
|
||||
|
||||
expectedIdFile := clusterIDsFile{
|
||||
@ -343,13 +205,12 @@ func TestWriteOutput(t *testing.T) {
|
||||
testFs := afero.NewMemMapFs()
|
||||
fileHandler := file.NewHandler(testFs)
|
||||
|
||||
err := result.writeOutput(&out, fileHandler)
|
||||
err := writeOutput(resp, &out, fileHandler)
|
||||
assert.NoError(err)
|
||||
assert.Contains(out.String(), result.clientVpnIP)
|
||||
assert.Contains(out.String(), result.coordinatorPubIP)
|
||||
assert.Contains(out.String(), result.coordinatorPubKey)
|
||||
assert.Contains(out.String(), result.clusterID)
|
||||
assert.Contains(out.String(), result.ownerID)
|
||||
assert.Contains(out.String(), resp.OwnerId)
|
||||
assert.Contains(out.String(), resp.ClusterId)
|
||||
assert.Contains(out.String(), constants.AdminConfFilename)
|
||||
assert.Equal(resp.Kubeconfig, string(adminConf))
|
||||
|
||||
afs := afero.Afero{Fs: testFs}
|
||||
adminConf, err := afs.ReadFile(constants.AdminConfFilename)
|
||||
@ -364,15 +225,6 @@ func TestWriteOutput(t *testing.T) {
|
||||
assert.Equal(expectedIdFile, testIdFile)
|
||||
}
|
||||
|
||||
func TestIpsToEndpoints(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
ips := []string{"192.0.2.1", "192.0.2.2", "", "192.0.2.3"}
|
||||
port := "8080"
|
||||
endpoints := ipsToEndpoints(ips, port)
|
||||
assert.Equal([]string{"192.0.2.1:8080", "192.0.2.2:8080", "192.0.2.3:8080"}, endpoints)
|
||||
}
|
||||
|
||||
func TestInitCompletion(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
args []string
|
||||
@ -412,27 +264,7 @@ func TestInitCompletion(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadOrGenerateVPNKey(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
testKey := []byte(base64.StdEncoding.EncodeToString([]byte("32bytesWireGuardKeyForTheTesting")))
|
||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||
require.NoError(fileHandler.Write("testKey", testKey, file.OptNone))
|
||||
|
||||
privK, pubK, err := readOrGenerateVPNKey(fileHandler, "testKey")
|
||||
assert.NoError(err)
|
||||
assert.Equal(testKey, privK)
|
||||
assert.NotEmpty(pubK)
|
||||
|
||||
// no path provided
|
||||
privK, pubK, err = readOrGenerateVPNKey(fileHandler, "")
|
||||
assert.NoError(err)
|
||||
assert.NotEmpty(privK)
|
||||
assert.NotEmpty(pubK)
|
||||
}
|
||||
|
||||
func TestReadOrGenerateMasterSecret(t *testing.T) {
|
||||
func TestReadOrGeneratedMasterSecret(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
filename string
|
||||
filecontent string
|
||||
@ -523,157 +355,16 @@ func TestReadOrGenerateMasterSecret(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoscaleFlag(t *testing.T) {
|
||||
testKey := base64.StdEncoding.EncodeToString([]byte("32bytesWireGuardKeyForTheTesting"))
|
||||
testGcpState := state.ConstellationState{
|
||||
CloudProvider: "gcp",
|
||||
GCPNodes: cloudtypes.Instances{
|
||||
"id-0": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||
"id-1": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||
},
|
||||
GCPCoordinators: cloudtypes.Instances{
|
||||
"id-c": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||
},
|
||||
}
|
||||
testAzureState := state.ConstellationState{
|
||||
CloudProvider: "azure",
|
||||
AzureNodes: cloudtypes.Instances{
|
||||
"id-0": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||
"id-1": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||
},
|
||||
AzureCoordinators: cloudtypes.Instances{
|
||||
"id-c": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||
},
|
||||
AzureResourceGroup: "test",
|
||||
}
|
||||
testActivationResps := []fakeActivationRespMessage{
|
||||
{log: "testlog1"},
|
||||
{log: "testlog2"},
|
||||
{
|
||||
kubeconfig: "kubeconfig",
|
||||
clientVpnIp: "192.0.2.2",
|
||||
coordinatorVpnKey: testKey,
|
||||
ownerID: "ownerID",
|
||||
clusterID: "clusterID",
|
||||
},
|
||||
{log: "testlog3"},
|
||||
}
|
||||
type stubInitServer struct {
|
||||
initResp *initproto.InitResponse
|
||||
initErr error
|
||||
|
||||
testCases := map[string]struct {
|
||||
autoscaleFlag bool
|
||||
existingState state.ConstellationState
|
||||
client *stubProtoClient
|
||||
serviceAccountCreator stubServiceAccountCreator
|
||||
waiter statusWaiter
|
||||
privKey string
|
||||
}{
|
||||
"initialize some gcp instances without autoscale flag": {
|
||||
autoscaleFlag: false,
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{
|
||||
respClient: &fakeActivationRespClient{responses: testActivationResps},
|
||||
},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
},
|
||||
"initialize some azure instances without autoscale flag": {
|
||||
autoscaleFlag: false,
|
||||
existingState: testAzureState,
|
||||
client: &stubProtoClient{
|
||||
respClient: &fakeActivationRespClient{responses: testActivationResps},
|
||||
},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
},
|
||||
"initialize some gcp instances with autoscale flag": {
|
||||
autoscaleFlag: true,
|
||||
existingState: testGcpState,
|
||||
client: &stubProtoClient{
|
||||
respClient: &fakeActivationRespClient{responses: testActivationResps},
|
||||
},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
},
|
||||
"initialize some azure instances with autoscale flag": {
|
||||
autoscaleFlag: true,
|
||||
existingState: testAzureState,
|
||||
client: &stubProtoClient{
|
||||
respClient: &fakeActivationRespClient{responses: testActivationResps},
|
||||
},
|
||||
waiter: &stubStatusWaiter{},
|
||||
privKey: testKey,
|
||||
},
|
||||
}
|
||||
activateAutoscalingNodeGroups []string
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
cmd := NewInitCmd()
|
||||
var out bytes.Buffer
|
||||
cmd.SetOut(&out)
|
||||
var errOut bytes.Buffer
|
||||
cmd.SetErr(&errOut)
|
||||
cmd.Flags().String("config", "", "") // register persisten flag manually
|
||||
fs := afero.NewMemMapFs()
|
||||
fileHandler := file.NewHandler(fs)
|
||||
vpnHandler := stubVPNHandler{}
|
||||
require.NoError(fileHandler.WriteJSON(constants.StateFilename, tc.existingState, file.OptNone))
|
||||
|
||||
// Write key file to filesystem and set path in flag.
|
||||
require.NoError(afero.Afero{Fs: fs}.WriteFile("privK", []byte(tc.privKey), 0o600))
|
||||
require.NoError(cmd.Flags().Set("privatekey", "privK"))
|
||||
|
||||
require.NoError(cmd.Flags().Set("autoscale", strconv.FormatBool(tc.autoscaleFlag)))
|
||||
|
||||
require.NoError(initialize(cmd, tc.client, &tc.serviceAccountCreator, fileHandler, tc.waiter, &vpnHandler))
|
||||
if tc.autoscaleFlag {
|
||||
assert.Len(tc.client.activateAutoscalingNodeGroups, 1)
|
||||
} else {
|
||||
assert.Len(tc.client.activateAutoscalingNodeGroups, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
initproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func TestWriteWGQuickFile(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
fileHandler file.Handler
|
||||
vpnHandler *stubVPNHandler
|
||||
vpnConfig *wgquick.Config
|
||||
wantErr bool
|
||||
}{
|
||||
"write wg quick file": {
|
||||
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
||||
vpnHandler: &stubVPNHandler{marshalRes: "config"},
|
||||
},
|
||||
"marshal failed": {
|
||||
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
||||
vpnHandler: &stubVPNHandler{marshalErr: errors.New("some err")},
|
||||
wantErr: true,
|
||||
},
|
||||
"write fails": {
|
||||
fileHandler: file.NewHandler(afero.NewReadOnlyFs(afero.NewMemMapFs())),
|
||||
vpnHandler: &stubVPNHandler{marshalRes: "config"},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
err := writeWGQuickFile(tc.fileHandler, tc.vpnHandler, tc.vpnConfig)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
file, err := tc.fileHandler.Read(constants.WGQuickConfigFilename)
|
||||
assert.NoError(err)
|
||||
assert.Contains(string(file), tc.vpnHandler.marshalRes)
|
||||
}
|
||||
})
|
||||
}
|
||||
func (s *stubInitServer) Init(ctx context.Context, req *initproto.InitRequest) (*initproto.InitResponse, error) {
|
||||
s.activateAutoscalingNodeGroups = req.AutoscalingNodeGroups
|
||||
return s.initResp, s.initErr
|
||||
}
|
||||
|
@ -1,17 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/edgelesssys/constellation/cli/internal/proto"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
)
|
||||
|
||||
type protoClient interface {
|
||||
Connect(endpoint string, validators []atls.Validator) error
|
||||
Close() error
|
||||
GetState(ctx context.Context) (state.State, error)
|
||||
Activate(ctx context.Context, userPublicKey, masterSecret []byte, nodeIPs, coordinatorIPs, autoscalingNodeGroups []string, cloudServiceAccountURI string, sshUsers []*pubproto.SSHUserKey) (proto.ActivationResponseClient, error)
|
||||
}
|
@ -1,225 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/edgelesssys/constellation/cli/internal/proto"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
)
|
||||
|
||||
type stubProtoClient struct {
|
||||
conn bool
|
||||
respClient proto.ActivationResponseClient
|
||||
connectErr error
|
||||
closeErr error
|
||||
getStateErr error
|
||||
activateErr error
|
||||
|
||||
getStateState state.State
|
||||
activateUserPublicKey []byte
|
||||
activateMasterSecret []byte
|
||||
activateNodeIPs []string
|
||||
activateCoordinatorIPs []string
|
||||
activateAutoscalingNodeGroups []string
|
||||
cloudServiceAccountURI string
|
||||
sshUserKeys []*pubproto.SSHUserKey
|
||||
}
|
||||
|
||||
func (c *stubProtoClient) Connect(_ string, _ []atls.Validator) error {
|
||||
c.conn = true
|
||||
return c.connectErr
|
||||
}
|
||||
|
||||
func (c *stubProtoClient) Close() error {
|
||||
c.conn = false
|
||||
return c.closeErr
|
||||
}
|
||||
|
||||
func (c *stubProtoClient) GetState(_ context.Context) (state.State, error) {
|
||||
return c.getStateState, c.getStateErr
|
||||
}
|
||||
|
||||
func (c *stubProtoClient) Activate(ctx context.Context, userPublicKey, masterSecret []byte, nodeIPs, coordinatorIPs []string, autoscalingNodeGroups []string, cloudServiceAccountURI string, sshUserKeys []*pubproto.SSHUserKey) (proto.ActivationResponseClient, error) {
|
||||
c.activateUserPublicKey = userPublicKey
|
||||
c.activateMasterSecret = masterSecret
|
||||
c.activateNodeIPs = nodeIPs
|
||||
c.activateCoordinatorIPs = coordinatorIPs
|
||||
c.activateAutoscalingNodeGroups = autoscalingNodeGroups
|
||||
c.cloudServiceAccountURI = cloudServiceAccountURI
|
||||
c.sshUserKeys = sshUserKeys
|
||||
|
||||
return c.respClient, c.activateErr
|
||||
}
|
||||
|
||||
func (c *stubProtoClient) ActivateAdditionalCoordinators(ctx context.Context, ips []string) error {
|
||||
return c.activateErr
|
||||
}
|
||||
|
||||
type stubActivationRespClient struct {
|
||||
nextLogErr *error
|
||||
getKubeconfigErr error
|
||||
getCoordinatorVpnKeyErr error
|
||||
getClientVpnIpErr error
|
||||
getOwnerIDErr error
|
||||
getClusterIDErr error
|
||||
writeLogStreamErr error
|
||||
}
|
||||
|
||||
func (s *stubActivationRespClient) NextLog() (string, error) {
|
||||
if s.nextLogErr == nil {
|
||||
return "", io.EOF
|
||||
}
|
||||
return "", *s.nextLogErr
|
||||
}
|
||||
|
||||
func (s *stubActivationRespClient) WriteLogStream(io.Writer) error {
|
||||
return s.writeLogStreamErr
|
||||
}
|
||||
|
||||
func (s *stubActivationRespClient) GetKubeconfig() (string, error) {
|
||||
return "", s.getKubeconfigErr
|
||||
}
|
||||
|
||||
func (s *stubActivationRespClient) GetCoordinatorVpnKey() (string, error) {
|
||||
return "", s.getCoordinatorVpnKeyErr
|
||||
}
|
||||
|
||||
func (s *stubActivationRespClient) GetClientVpnIp() (string, error) {
|
||||
return "", s.getClientVpnIpErr
|
||||
}
|
||||
|
||||
func (s *stubActivationRespClient) GetOwnerID() (string, error) {
|
||||
return "", s.getOwnerIDErr
|
||||
}
|
||||
|
||||
func (s *stubActivationRespClient) GetClusterID() (string, error) {
|
||||
return "", s.getClusterIDErr
|
||||
}
|
||||
|
||||
type fakeProtoClient struct {
|
||||
conn bool
|
||||
respClient proto.ActivationResponseClient
|
||||
}
|
||||
|
||||
func (c *fakeProtoClient) Connect(endpoint string, validators []atls.Validator) error {
|
||||
if endpoint == "" {
|
||||
return errors.New("endpoint is empty")
|
||||
}
|
||||
if len(validators) == 0 {
|
||||
return errors.New("validators is empty")
|
||||
}
|
||||
c.conn = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeProtoClient) Close() error {
|
||||
c.conn = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeProtoClient) GetState(_ context.Context) (state.State, error) {
|
||||
if !c.conn {
|
||||
return state.Uninitialized, errors.New("client is not connected")
|
||||
}
|
||||
return state.IsNode, nil
|
||||
}
|
||||
|
||||
func (c *fakeProtoClient) Activate(ctx context.Context, userPublicKey, masterSecret []byte, nodeIPs, coordinatorIPs, autoscalingNodeGroups []string, cloudServiceAccountURI string, sshUserKeys []*pubproto.SSHUserKey) (proto.ActivationResponseClient, error) {
|
||||
if !c.conn {
|
||||
return nil, errors.New("client is not connected")
|
||||
}
|
||||
return c.respClient, nil
|
||||
}
|
||||
|
||||
func (c *fakeProtoClient) ActivateAdditionalCoordinators(ctx context.Context, ips []string) error {
|
||||
if !c.conn {
|
||||
return errors.New("client is not connected")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeActivationRespClient struct {
|
||||
responses []fakeActivationRespMessage
|
||||
kubeconfig string
|
||||
coordinatorVpnKey string
|
||||
clientVpnIp string
|
||||
ownerID string
|
||||
clusterID string
|
||||
}
|
||||
|
||||
func (c *fakeActivationRespClient) NextLog() (string, error) {
|
||||
for len(c.responses) > 0 {
|
||||
resp := c.responses[0]
|
||||
c.responses = c.responses[1:]
|
||||
if len(resp.log) > 0 {
|
||||
return resp.log, nil
|
||||
}
|
||||
c.kubeconfig = resp.kubeconfig
|
||||
c.coordinatorVpnKey = resp.coordinatorVpnKey
|
||||
c.clientVpnIp = resp.clientVpnIp
|
||||
c.ownerID = resp.ownerID
|
||||
c.clusterID = resp.clusterID
|
||||
}
|
||||
return "", io.EOF
|
||||
}
|
||||
|
||||
func (c *fakeActivationRespClient) WriteLogStream(w io.Writer) error {
|
||||
log, err := c.NextLog()
|
||||
for err == nil {
|
||||
fmt.Fprint(w, log)
|
||||
log, err = c.NextLog()
|
||||
}
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeActivationRespClient) GetKubeconfig() (string, error) {
|
||||
if c.kubeconfig == "" {
|
||||
return "", errors.New("kubeconfig is empty")
|
||||
}
|
||||
return c.kubeconfig, nil
|
||||
}
|
||||
|
||||
func (c *fakeActivationRespClient) GetCoordinatorVpnKey() (string, error) {
|
||||
if c.coordinatorVpnKey == "" {
|
||||
return "", errors.New("control-plane public VPN key is empty")
|
||||
}
|
||||
return c.coordinatorVpnKey, nil
|
||||
}
|
||||
|
||||
func (c *fakeActivationRespClient) GetClientVpnIp() (string, error) {
|
||||
if c.clientVpnIp == "" {
|
||||
return "", errors.New("client VPN IP is empty")
|
||||
}
|
||||
return c.clientVpnIp, nil
|
||||
}
|
||||
|
||||
func (c *fakeActivationRespClient) GetOwnerID() (string, error) {
|
||||
if c.ownerID == "" {
|
||||
return "", errors.New("init secret is empty")
|
||||
}
|
||||
return c.ownerID, nil
|
||||
}
|
||||
|
||||
func (c *fakeActivationRespClient) GetClusterID() (string, error) {
|
||||
if c.clusterID == "" {
|
||||
return "", errors.New("cluster identifier is empty")
|
||||
}
|
||||
return c.clusterID, nil
|
||||
}
|
||||
|
||||
type fakeActivationRespMessage struct {
|
||||
log string
|
||||
kubeconfig string
|
||||
coordinatorVpnKey string
|
||||
clientVpnIp string
|
||||
ownerID string
|
||||
clusterID string
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
)
|
||||
|
||||
type statusWaiter interface {
|
||||
InitializeValidators([]atls.Validator) error
|
||||
WaitForAll(ctx context.Context, endpoints []string, status ...state.State) error
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
)
|
||||
|
||||
type stubStatusWaiter struct {
|
||||
initialized bool
|
||||
initializeErr error
|
||||
waitForAllErr error
|
||||
}
|
||||
|
||||
func (s *stubStatusWaiter) InitializeValidators([]atls.Validator) error {
|
||||
s.initialized = true
|
||||
return s.initializeErr
|
||||
}
|
||||
|
||||
func (s *stubStatusWaiter) WaitForAll(ctx context.Context, endpoints []string, status ...state.State) error {
|
||||
if !s.initialized {
|
||||
return errors.New("waiter not initialized")
|
||||
}
|
||||
return s.waitForAllErr
|
||||
}
|
@ -1,145 +0,0 @@
|
||||
package proto
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
kms "github.com/edgelesssys/constellation/kms/setup"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Client wraps a PubAPI client and the connection to it.
|
||||
type Client struct {
|
||||
conn *grpc.ClientConn
|
||||
pubapi pubproto.APIClient
|
||||
}
|
||||
|
||||
// Connect connects the client to a given server, using the handed
|
||||
// Validators for the attestation of the connection.
|
||||
// The connection must be closed using Close(). If connect is
|
||||
// called on a client that already has a connection, the old
|
||||
// connection is closed.
|
||||
func (c *Client) Connect(endpoint string, validators []atls.Validator) error {
|
||||
creds := atlscredentials.New(nil, validators)
|
||||
|
||||
conn, err := grpc.Dial(endpoint, grpc.WithTransportCredentials(creds))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.conn != nil {
|
||||
c.conn.Close()
|
||||
}
|
||||
c.conn = conn
|
||||
c.pubapi = pubproto.NewAPIClient(conn)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the grpc connection of the client.
|
||||
// Close is idempotent and can be called on non connected clients
|
||||
// without returning an error.
|
||||
func (c *Client) Close() error {
|
||||
if c.conn == nil {
|
||||
return nil
|
||||
}
|
||||
if err := c.conn.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.conn = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetState returns the state of the connected server.
|
||||
func (c *Client) GetState(ctx context.Context) (state.State, error) {
|
||||
if c.pubapi == nil {
|
||||
return state.Uninitialized, errors.New("client is not connected")
|
||||
}
|
||||
|
||||
resp, err := c.pubapi.GetState(ctx, &pubproto.GetStateRequest{})
|
||||
if err != nil {
|
||||
return state.Uninitialized, err
|
||||
}
|
||||
return state.State(resp.State), nil
|
||||
}
|
||||
|
||||
// Activate activates the Constellation coordinator via a grpc call.
|
||||
// The handed IP addresses must be the private IP addresses of running AWS or GCP instances,
|
||||
// and the userPublicKey is the VPN key of the users WireGuard interface.
|
||||
func (c *Client) Activate(ctx context.Context, userPublicKey, masterSecret []byte, nodeIPs, coordinatorIPs, autoscalingNodeGroups []string, cloudServiceAccountURI string, sshUserKeys []*pubproto.SSHUserKey) (ActivationResponseClient, error) {
|
||||
if c.pubapi == nil {
|
||||
return nil, errors.New("client is not connected")
|
||||
}
|
||||
if len(userPublicKey) == 0 {
|
||||
return nil, errors.New("parameter userPublicKey is empty")
|
||||
}
|
||||
if len(nodeIPs) == 0 {
|
||||
return nil, errors.New("parameter ips is empty")
|
||||
}
|
||||
|
||||
pubKey, err := wgtypes.ParseKey(string(userPublicKey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := &pubproto.ActivateAsCoordinatorRequest{
|
||||
AdminVpnPubKey: pubKey[:],
|
||||
NodePublicIps: nodeIPs,
|
||||
CoordinatorPublicIps: coordinatorIPs,
|
||||
AutoscalingNodeGroups: autoscalingNodeGroups,
|
||||
MasterSecret: masterSecret,
|
||||
KmsUri: kms.ClusterKMSURI,
|
||||
StorageUri: kms.NoStoreURI,
|
||||
KeyEncryptionKeyId: "",
|
||||
UseExistingKek: false,
|
||||
CloudServiceAccountUri: cloudServiceAccountURI,
|
||||
SshUserKeys: sshUserKeys,
|
||||
}
|
||||
|
||||
client, err := c.pubapi.ActivateAsCoordinator(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewActivationRespClient(client), nil
|
||||
}
|
||||
|
||||
// ActivationResponseClient has methods to read messages from a stream of
|
||||
// ActivateAsCoordinatorResponses.
|
||||
type ActivationResponseClient interface {
|
||||
// NextLog reads responses from the response stream and returns the
|
||||
// first received log.
|
||||
// If AdminConfig responses are received before the first log response
|
||||
// occurs, the state of the client is updated with those configs. An
|
||||
// io.EOF error is returned at the end of the stream.
|
||||
NextLog() (string, error)
|
||||
|
||||
// WriteLogStream reads responses from the response stream and
|
||||
// writes log responses to the handed writer.
|
||||
// Occurring AdminConfig responses update the state of the client.
|
||||
WriteLogStream(io.Writer) error
|
||||
|
||||
// GetKubeconfig returns the kubeconfig that was received in the
|
||||
// latest AdminConfig response or an error if the field is empty.
|
||||
GetKubeconfig() (string, error)
|
||||
|
||||
// GetCoordinatorVpnKey returns the Coordinator's VPN key that was
|
||||
// received in the latest AdminConfig response or an error if the field
|
||||
// is empty.
|
||||
GetCoordinatorVpnKey() (string, error)
|
||||
|
||||
// GetClientVpnIp returns the client VPN IP that was received
|
||||
// in the latest AdminConfig response or an error if the field is empty.
|
||||
GetClientVpnIp() (string, error)
|
||||
|
||||
// GetOwnerID returns the owner identifier, derived from the client's master secret
|
||||
// or an error if the field is empty.
|
||||
GetOwnerID() (string, error)
|
||||
|
||||
// GetClusterID returns the cluster's unique identifier
|
||||
// or an error if the field is empty.
|
||||
GetClusterID() (string, error)
|
||||
}
|
@ -1,220 +0,0 @@
|
||||
package proto
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/test/bufconn"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m,
|
||||
// https://github.com/census-instrumentation/opencensus-go/issues/1262
|
||||
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
|
||||
)
|
||||
}
|
||||
|
||||
func TestClose(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
client := Client{}
|
||||
|
||||
// Create a connection.
|
||||
listener := bufconn.Listen(4)
|
||||
defer listener.Close()
|
||||
ctx := context.Background()
|
||||
conn, err := grpc.DialContext(ctx, "", grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
||||
return listener.Dial()
|
||||
}), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
require.NoError(err)
|
||||
defer conn.Close()
|
||||
|
||||
// Wait for connection to reach 'connecting' state.
|
||||
// Connection is not yet usable in this state, but we just need
|
||||
// any stable non 'shutdown' state to validate that the state
|
||||
// previous to calling close isn't already 'shutdown'.
|
||||
err = func() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if connectivity.Connecting == conn.GetState() {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
require.NoError(err)
|
||||
|
||||
client.conn = conn
|
||||
|
||||
// Close connection.
|
||||
assert.NoError(client.Close())
|
||||
assert.Empty(client.conn)
|
||||
assert.Equal(connectivity.Shutdown, conn.GetState())
|
||||
|
||||
// Close closed connection.
|
||||
assert.NoError(client.Close())
|
||||
assert.Empty(client.conn)
|
||||
assert.Equal(connectivity.Shutdown, conn.GetState())
|
||||
}
|
||||
|
||||
func TestGetState(t *testing.T) {
|
||||
someErr := errors.New("some error")
|
||||
|
||||
testCases := map[string]struct {
|
||||
pubAPIClient pubproto.APIClient
|
||||
wantErr bool
|
||||
wantState state.State
|
||||
}{
|
||||
"success": {
|
||||
pubAPIClient: &stubPubAPIClient{getStateState: state.IsNode},
|
||||
wantState: state.IsNode,
|
||||
},
|
||||
"getState error": {
|
||||
pubAPIClient: &stubPubAPIClient{getStateErr: someErr},
|
||||
wantErr: true,
|
||||
},
|
||||
"uninitialized": {
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
client := Client{}
|
||||
if tc.pubAPIClient != nil {
|
||||
client.pubapi = tc.pubAPIClient
|
||||
}
|
||||
|
||||
state, err := client.GetState(context.Background())
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
assert.Equal(tc.wantState, state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestActivate(t *testing.T) {
|
||||
testKey := base64.StdEncoding.EncodeToString([]byte("32bytesWireGuardKeyForTheTesting"))
|
||||
someErr := errors.New("failed")
|
||||
|
||||
testCases := map[string]struct {
|
||||
pubAPIClient *stubPubAPIClient
|
||||
userPublicKey string
|
||||
ips []string
|
||||
wantErr bool
|
||||
}{
|
||||
"normal activation": {
|
||||
pubAPIClient: &stubPubAPIClient{},
|
||||
userPublicKey: testKey,
|
||||
ips: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
wantErr: false,
|
||||
},
|
||||
"client without pubAPIClient": {
|
||||
userPublicKey: testKey,
|
||||
ips: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
wantErr: true,
|
||||
},
|
||||
"empty public key parameter": {
|
||||
pubAPIClient: &stubPubAPIClient{},
|
||||
userPublicKey: "",
|
||||
ips: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid public key parameter": {
|
||||
pubAPIClient: &stubPubAPIClient{},
|
||||
userPublicKey: "invalid Key",
|
||||
ips: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
wantErr: true,
|
||||
},
|
||||
"empty ips parameter": {
|
||||
pubAPIClient: &stubPubAPIClient{},
|
||||
userPublicKey: testKey,
|
||||
ips: []string{},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail ActivateAsCoordinator": {
|
||||
pubAPIClient: &stubPubAPIClient{activateAsCoordinatorErr: someErr},
|
||||
userPublicKey: testKey,
|
||||
ips: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
client := Client{}
|
||||
if tc.pubAPIClient != nil {
|
||||
client.pubapi = tc.pubAPIClient
|
||||
}
|
||||
_, err := client.Activate(context.Background(), []byte(tc.userPublicKey), []byte("Constellation"), tc.ips, nil, nil, "serviceaccount://test", nil)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
assert.Equal("32bytesWireGuardKeyForTheTesting", string(tc.pubAPIClient.activateAsCoordinatorReqKey))
|
||||
assert.Equal(tc.ips, tc.pubAPIClient.activateAsCoordinatorReqIPs)
|
||||
assert.Equal("Constellation", string(tc.pubAPIClient.activateAsCoordinatorMasterSecret))
|
||||
assert.Equal("serviceaccount://test", tc.pubAPIClient.activateCloudServiceAccountURI)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type stubPubAPIClient struct {
|
||||
getStateState state.State
|
||||
getStateErr error
|
||||
activateAsCoordinatorErr error
|
||||
activateAdditionalNodesErr error
|
||||
activateAsCoordinatorReqKey []byte
|
||||
activateAsCoordinatorReqIPs []string
|
||||
activateAsCoordinatorMasterSecret []byte
|
||||
activateAdditionalNodesReqIPs []string
|
||||
activateCloudServiceAccountURI string
|
||||
pubproto.APIClient
|
||||
}
|
||||
|
||||
func (s *stubPubAPIClient) GetState(ctx context.Context, in *pubproto.GetStateRequest, opts ...grpc.CallOption) (*pubproto.GetStateResponse, error) {
|
||||
return &pubproto.GetStateResponse{State: uint32(s.getStateState)}, s.getStateErr
|
||||
}
|
||||
|
||||
func (s *stubPubAPIClient) ActivateAsCoordinator(ctx context.Context, in *pubproto.ActivateAsCoordinatorRequest,
|
||||
opts ...grpc.CallOption,
|
||||
) (pubproto.API_ActivateAsCoordinatorClient, error) {
|
||||
s.activateAsCoordinatorReqKey = in.AdminVpnPubKey
|
||||
s.activateAsCoordinatorReqIPs = in.NodePublicIps
|
||||
s.activateAsCoordinatorMasterSecret = in.MasterSecret
|
||||
s.activateCloudServiceAccountURI = in.CloudServiceAccountUri
|
||||
return dummyActivateAsCoordinatorClient{}, s.activateAsCoordinatorErr
|
||||
}
|
||||
|
||||
func (s *stubPubAPIClient) ActivateAdditionalNodes(ctx context.Context, in *pubproto.ActivateAdditionalNodesRequest,
|
||||
opts ...grpc.CallOption,
|
||||
) (pubproto.API_ActivateAdditionalNodesClient, error) {
|
||||
s.activateAdditionalNodesReqIPs = in.NodePublicIps
|
||||
return dummyActivateAdditionalNodesClient{}, s.activateAdditionalNodesErr
|
||||
}
|
@ -1,117 +0,0 @@
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
)
|
||||
|
||||
// ActivationRespClient has methods to read messages from a stream of
|
||||
// ActivateAsCoordinatorResponses. It wraps an API_ActivateAsCoordinatorClient.
|
||||
type ActivationRespClient struct {
|
||||
client pubproto.API_ActivateAsCoordinatorClient
|
||||
kubeconfig string
|
||||
coordinatorVpnKey string
|
||||
clientVpnIp string
|
||||
ownerID string
|
||||
clusterID string
|
||||
}
|
||||
|
||||
// NewActivationRespClient creates a new ActivationRespClient with the handed
|
||||
// API_ActivateAsCoordinatorClient.
|
||||
func NewActivationRespClient(client pubproto.API_ActivateAsCoordinatorClient) *ActivationRespClient {
|
||||
return &ActivationRespClient{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// NextLog reads responses from the response stream and returns the
|
||||
// first received log.
|
||||
func (a *ActivationRespClient) NextLog() (string, error) {
|
||||
for {
|
||||
resp, err := a.client.Recv()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
switch x := resp.Content.(type) {
|
||||
case *pubproto.ActivateAsCoordinatorResponse_Log:
|
||||
return x.Log.Message, nil
|
||||
case *pubproto.ActivateAsCoordinatorResponse_AdminConfig:
|
||||
config := x.AdminConfig
|
||||
a.kubeconfig = string(config.Kubeconfig)
|
||||
|
||||
coordinatorVpnKey, err := wgtypes.NewKey(config.CoordinatorVpnPubKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
a.coordinatorVpnKey = coordinatorVpnKey.String()
|
||||
a.clientVpnIp = config.AdminVpnIp
|
||||
a.ownerID = base64.StdEncoding.EncodeToString(config.OwnerId)
|
||||
a.clusterID = base64.StdEncoding.EncodeToString(config.ClusterId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WriteLogStream reads responses from the response stream and
|
||||
// writes log responses to the handed writer.
|
||||
func (a *ActivationRespClient) WriteLogStream(w io.Writer) error {
|
||||
log, err := a.NextLog()
|
||||
for err == nil {
|
||||
fmt.Fprintln(w, log)
|
||||
log, err = a.NextLog()
|
||||
}
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetKubeconfig returns the kubeconfig that was received in the
|
||||
// latest AdminConfig response or an error if the field is empty.
|
||||
func (a *ActivationRespClient) GetKubeconfig() (string, error) {
|
||||
if a.kubeconfig == "" {
|
||||
return "", errors.New("kubeconfig is empty")
|
||||
}
|
||||
return a.kubeconfig, nil
|
||||
}
|
||||
|
||||
// GetCoordinatorVpnKey returns the Coordinator's VPN key that was
|
||||
// received in the latest AdminConfig response or an error if the field
|
||||
// is empty.
|
||||
func (a *ActivationRespClient) GetCoordinatorVpnKey() (string, error) {
|
||||
if a.coordinatorVpnKey == "" {
|
||||
return "", errors.New("coordinator public VPN key is empty")
|
||||
}
|
||||
return a.coordinatorVpnKey, nil
|
||||
}
|
||||
|
||||
// GetClientVpnIp returns the client VPN IP that was received
|
||||
// in the latest AdminConfig response or an error if the field is empty.
|
||||
func (a *ActivationRespClient) GetClientVpnIp() (string, error) {
|
||||
if a.clientVpnIp == "" {
|
||||
return "", errors.New("client VPN IP is empty")
|
||||
}
|
||||
return a.clientVpnIp, nil
|
||||
}
|
||||
|
||||
// GetOwnerID returns the owner identifier, derived from the client's master secret
|
||||
// or an error if the field is empty.
|
||||
func (a *ActivationRespClient) GetOwnerID() (string, error) {
|
||||
if a.ownerID == "" {
|
||||
return "", errors.New("secret identifier is empty")
|
||||
}
|
||||
return a.ownerID, nil
|
||||
}
|
||||
|
||||
// GetClusterID returns the cluster's unique identifier
|
||||
// or an error if the field is empty.
|
||||
func (a *ActivationRespClient) GetClusterID() (string, error) {
|
||||
if a.clusterID == "" {
|
||||
return "", errors.New("cluster identifier is empty")
|
||||
}
|
||||
return a.clusterID, nil
|
||||
}
|
@ -1,241 +0,0 @@
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// dummyActivateAsCoordinatorClient is a dummy and panics if Recv() is called.
|
||||
type dummyActivateAsCoordinatorClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (c dummyActivateAsCoordinatorClient) Recv() (*pubproto.ActivateAsCoordinatorResponse, error) {
|
||||
panic("i'm a dummy, Recv() not implemented")
|
||||
}
|
||||
|
||||
// dummyActivateAsCoordinatorClient is a dummy and panics if Recv() is called.
|
||||
type dummyActivateAdditionalNodesClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (c dummyActivateAdditionalNodesClient) Recv() (*pubproto.ActivateAdditionalNodesResponse, error) {
|
||||
panic("i'm a dummy, Recv() not implemented")
|
||||
}
|
||||
|
||||
// stubActivationAsCoordinatorClient recives responses from an predefined
|
||||
// response stream iterator or a stub error.
|
||||
type stubActivationAsCoordinatorClient struct {
|
||||
grpc.ClientStream
|
||||
|
||||
stream *stubActivateAsCoordinatorResponseIter
|
||||
recvErr error
|
||||
}
|
||||
|
||||
func (c stubActivationAsCoordinatorClient) Recv() (*pubproto.ActivateAsCoordinatorResponse, error) {
|
||||
if c.recvErr != nil {
|
||||
return nil, c.recvErr
|
||||
}
|
||||
return c.stream.Next()
|
||||
}
|
||||
|
||||
// stubActivateAsCoordinatorResponseIter is an iterator over a slice of
|
||||
// ActivateAsCoordinatorResponses. It returns the messages in the order
|
||||
// they occur in the slice and returns an io.EOF error when no response
|
||||
// is left.
|
||||
type stubActivateAsCoordinatorResponseIter struct {
|
||||
msgs []*pubproto.ActivateAsCoordinatorResponse
|
||||
}
|
||||
|
||||
// Next returns the next message from the message slice or an io.EOF error
|
||||
// if the message slice is empty.
|
||||
func (q *stubActivateAsCoordinatorResponseIter) Next() (*pubproto.ActivateAsCoordinatorResponse, error) {
|
||||
if len(q.msgs) == 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
msg := q.msgs[0]
|
||||
q.msgs = q.msgs[1:]
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func TestNextLog(t *testing.T) {
|
||||
testClientVpnIp := "192.0.2.1"
|
||||
testCoordinatorVpnKey := []byte("32bytesWireGuardKeyForTheTesting")
|
||||
testCoordinatorVpnKey64 := []byte("MzJieXRlc1dpcmVHdWFyZEtleUZvclRoZVRlc3Rpbmc=")
|
||||
testKubeconfig := []byte("apiVersion:v1 kind:Config...")
|
||||
testConfigResp := &pubproto.ActivateAsCoordinatorResponse{
|
||||
Content: &pubproto.ActivateAsCoordinatorResponse_AdminConfig{
|
||||
AdminConfig: &pubproto.AdminConfig{
|
||||
AdminVpnIp: testClientVpnIp,
|
||||
CoordinatorVpnPubKey: testCoordinatorVpnKey,
|
||||
Kubeconfig: testKubeconfig,
|
||||
},
|
||||
},
|
||||
}
|
||||
testLogMessage := "some log message"
|
||||
testLogResp := &pubproto.ActivateAsCoordinatorResponse{
|
||||
Content: &pubproto.ActivateAsCoordinatorResponse_Log{
|
||||
Log: &pubproto.Log{
|
||||
Message: testLogMessage,
|
||||
},
|
||||
},
|
||||
}
|
||||
someErr := errors.New("failed")
|
||||
|
||||
testCases := map[string]struct {
|
||||
msgs []*pubproto.ActivateAsCoordinatorResponse
|
||||
wantLogLen int
|
||||
wantState bool
|
||||
recvErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"some logs": {
|
||||
msgs: []*pubproto.ActivateAsCoordinatorResponse{testLogResp, testLogResp, testLogResp},
|
||||
wantLogLen: 3,
|
||||
},
|
||||
"only admin config": {
|
||||
msgs: []*pubproto.ActivateAsCoordinatorResponse{testConfigResp},
|
||||
wantState: true,
|
||||
},
|
||||
"logs and configs": {
|
||||
msgs: []*pubproto.ActivateAsCoordinatorResponse{testLogResp, testConfigResp, testLogResp, testConfigResp},
|
||||
wantLogLen: 2,
|
||||
wantState: true,
|
||||
},
|
||||
"no response": {
|
||||
msgs: []*pubproto.ActivateAsCoordinatorResponse{},
|
||||
wantLogLen: 0,
|
||||
},
|
||||
"recv fail": {
|
||||
recvErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
respClient := stubActivationAsCoordinatorClient{
|
||||
stream: &stubActivateAsCoordinatorResponseIter{
|
||||
msgs: tc.msgs,
|
||||
},
|
||||
recvErr: tc.recvErr,
|
||||
}
|
||||
client := NewActivationRespClient(respClient)
|
||||
|
||||
var logs []string
|
||||
var err error
|
||||
for err == nil {
|
||||
var log string
|
||||
log, err = client.NextLog()
|
||||
if err == nil {
|
||||
logs = append(logs, log)
|
||||
}
|
||||
}
|
||||
|
||||
assert.Error(err)
|
||||
if tc.wantErr {
|
||||
assert.NotErrorIs(err, io.EOF)
|
||||
return
|
||||
}
|
||||
|
||||
assert.ErrorIs(err, io.EOF)
|
||||
assert.Len(logs, tc.wantLogLen)
|
||||
|
||||
if tc.wantState {
|
||||
ip, err := client.GetClientVpnIp()
|
||||
assert.NoError(err)
|
||||
assert.Equal(testClientVpnIp, ip)
|
||||
config, err := client.GetKubeconfig()
|
||||
assert.NoError(err)
|
||||
assert.Equal(string(testKubeconfig), config)
|
||||
key, err := client.GetCoordinatorVpnKey()
|
||||
assert.NoError(err)
|
||||
assert.Equal(string(testCoordinatorVpnKey64), key)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintLogStream(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
//
|
||||
// 10 logs a 10 byte
|
||||
//
|
||||
var msgs []*pubproto.ActivateAsCoordinatorResponse
|
||||
for i := 0; i < 10; i++ {
|
||||
msgs = append(msgs, &pubproto.ActivateAsCoordinatorResponse{
|
||||
Content: &pubproto.ActivateAsCoordinatorResponse_Log{
|
||||
Log: &pubproto.Log{
|
||||
Message: "10BytesLog",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
respClient := stubActivationAsCoordinatorClient{
|
||||
stream: &stubActivateAsCoordinatorResponseIter{
|
||||
msgs: msgs,
|
||||
},
|
||||
}
|
||||
client := NewActivationRespClient(respClient)
|
||||
out := &bytes.Buffer{}
|
||||
assert.NoError(client.WriteLogStream(out))
|
||||
assert.Equal(out.Len(), 10*11) // 10 messages * (len(message) + 1 newline)
|
||||
|
||||
//
|
||||
// Check error handling.
|
||||
//
|
||||
someErr := errors.New("failed")
|
||||
respClient = stubActivationAsCoordinatorClient{
|
||||
recvErr: someErr,
|
||||
}
|
||||
client = NewActivationRespClient(respClient)
|
||||
assert.Error(client.WriteLogStream(&bytes.Buffer{}))
|
||||
}
|
||||
|
||||
func TestGetKubeconfig(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
client := NewActivationRespClient(dummyActivateAsCoordinatorClient{})
|
||||
_, err := client.GetKubeconfig()
|
||||
assert.Error(err)
|
||||
|
||||
client.kubeconfig = "apiVersion:v1 kind:Config..."
|
||||
config, err := client.GetKubeconfig()
|
||||
assert.NoError(err)
|
||||
assert.Equal("apiVersion:v1 kind:Config...", config)
|
||||
}
|
||||
|
||||
func TestGetCoordinatorVpnKey(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
client := NewActivationRespClient(dummyActivateAsCoordinatorClient{})
|
||||
_, err := client.GetCoordinatorVpnKey()
|
||||
assert.Error(err)
|
||||
|
||||
client.coordinatorVpnKey = "32bytesWireGuardKeyForTheTesting"
|
||||
key, err := client.GetCoordinatorVpnKey()
|
||||
assert.NoError(err)
|
||||
assert.Equal("32bytesWireGuardKeyForTheTesting", key)
|
||||
}
|
||||
|
||||
func TestGetClientVpnIp(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
client := NewActivationRespClient(dummyActivateAsCoordinatorClient{})
|
||||
_, err := client.GetClientVpnIp()
|
||||
assert.Error(err)
|
||||
|
||||
client.clientVpnIp = "192.0.2.1"
|
||||
ip, err := client.GetClientVpnIp()
|
||||
assert.NoError(err)
|
||||
assert.Equal("192.0.2.1", ip)
|
||||
}
|
@ -1,101 +0,0 @@
|
||||
package vpn
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
wgquick "github.com/nmiculinic/wg-quick-go"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
)
|
||||
|
||||
const (
|
||||
interfaceName = "wg0"
|
||||
wireguardPort = 51820
|
||||
)
|
||||
|
||||
type ConfigHandler struct {
|
||||
up func(cfg *wgquick.Config, iface string) error
|
||||
}
|
||||
|
||||
func NewConfigHandler() *ConfigHandler {
|
||||
return &ConfigHandler{up: wgquick.Up}
|
||||
}
|
||||
|
||||
func (h *ConfigHandler) Create(coordinatorPubKey, coordinatorPubIP, clientPrivKey, clientVPNIP string, mtu int) (*wgquick.Config, error) {
|
||||
return NewWGQuickConfig(coordinatorPubKey, coordinatorPubIP, clientPrivKey, clientVPNIP, mtu)
|
||||
}
|
||||
|
||||
// Apply applies the generated WireGuard quick config.
|
||||
func (h *ConfigHandler) Apply(conf *wgquick.Config) error {
|
||||
return h.up(conf, interfaceName)
|
||||
}
|
||||
|
||||
// GetBytes returns the the bytes of the config.
|
||||
func (h *ConfigHandler) Marshal(conf *wgquick.Config) ([]byte, error) {
|
||||
data, err := conf.MarshalText()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal wg-quick config: %w", err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// newConfig creates a new WireGuard configuration.
|
||||
func newConfig(coordinatorPubKey, coordinatorPubIP, clientPrivKey string) (wgtypes.Config, error) {
|
||||
_, allowedIPs, err := net.ParseCIDR("10.118.0.1/32")
|
||||
if err != nil {
|
||||
return wgtypes.Config{}, fmt.Errorf("parsing CIDR: %w", err)
|
||||
}
|
||||
|
||||
coordinatorPubKeyParsed, err := wgtypes.ParseKey(coordinatorPubKey)
|
||||
if err != nil {
|
||||
return wgtypes.Config{}, fmt.Errorf("parsing coordinator public key: %w", err)
|
||||
}
|
||||
|
||||
var endpoint *net.UDPAddr
|
||||
if ip := net.ParseIP(coordinatorPubIP); ip != nil {
|
||||
endpoint = &net.UDPAddr{IP: ip, Port: wireguardPort}
|
||||
} else {
|
||||
endpoint = nil
|
||||
}
|
||||
clientPrivKeyParsed, err := wgtypes.ParseKey(clientPrivKey)
|
||||
if err != nil {
|
||||
return wgtypes.Config{}, fmt.Errorf("parsing client private key: %w", err)
|
||||
}
|
||||
listenPort := wireguardPort
|
||||
|
||||
keepAlive := 10 * time.Second
|
||||
return wgtypes.Config{
|
||||
PrivateKey: &clientPrivKeyParsed,
|
||||
ListenPort: &listenPort,
|
||||
ReplacePeers: false,
|
||||
Peers: []wgtypes.PeerConfig{
|
||||
{
|
||||
PublicKey: coordinatorPubKeyParsed,
|
||||
UpdateOnly: false,
|
||||
Endpoint: endpoint,
|
||||
AllowedIPs: []net.IPNet{*allowedIPs},
|
||||
PersistentKeepaliveInterval: &keepAlive,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewWGQuickConfig create a new WireGuard wg-quick configuration file and mashals it to bytes.
|
||||
func NewWGQuickConfig(coordinatorPubKey, coordinatorPubIP, clientPrivKey, clientVPNIP string, mtu int) (*wgquick.Config, error) {
|
||||
config, err := newConfig(coordinatorPubKey, coordinatorPubIP, clientPrivKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientIP := net.ParseIP(clientVPNIP)
|
||||
if clientIP == nil {
|
||||
return nil, fmt.Errorf("invalid client vpn ip '%s'", clientVPNIP)
|
||||
}
|
||||
quickfile := wgquick.Config{
|
||||
Config: config,
|
||||
Address: []net.IPNet{{IP: clientIP, Mask: []byte{255, 255, 0, 0}}},
|
||||
MTU: mtu,
|
||||
}
|
||||
return &quickfile, nil
|
||||
}
|
@ -1,165 +0,0 @@
|
||||
package vpn
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
wgquick "github.com/nmiculinic/wg-quick-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
testKey, err := wgtypes.GeneratePrivateKey()
|
||||
require.NoError(err)
|
||||
|
||||
testCases := map[string]struct {
|
||||
coordinatorPubKey string
|
||||
coordinatorPubIP string
|
||||
clientPrivKey string
|
||||
clientVPNIP string
|
||||
wantErr bool
|
||||
}{
|
||||
"valid config": {
|
||||
clientPrivKey: testKey.String(),
|
||||
clientVPNIP: "192.0.2.1",
|
||||
coordinatorPubKey: testKey.PublicKey().String(),
|
||||
coordinatorPubIP: "192.0.2.1",
|
||||
},
|
||||
"valid missing endpoint": {
|
||||
clientPrivKey: testKey.String(),
|
||||
clientVPNIP: "192.0.2.1",
|
||||
coordinatorPubKey: testKey.PublicKey().String(),
|
||||
},
|
||||
"invalid coordinator pub key": {
|
||||
clientPrivKey: testKey.String(),
|
||||
clientVPNIP: "192.0.2.1",
|
||||
coordinatorPubIP: "192.0.2.1",
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid client priv key": {
|
||||
clientVPNIP: "192.0.2.1",
|
||||
coordinatorPubKey: testKey.PublicKey().String(),
|
||||
coordinatorPubIP: "192.0.2.1",
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid client ip": {
|
||||
clientPrivKey: testKey.String(),
|
||||
coordinatorPubKey: testKey.PublicKey().String(),
|
||||
coordinatorPubIP: "192.0.2.1",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
handler := &ConfigHandler{}
|
||||
const mtu = 2
|
||||
|
||||
quickConfig, err := handler.Create(tc.coordinatorPubKey, tc.coordinatorPubIP, tc.clientPrivKey, tc.clientVPNIP, mtu)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
assert.Equal(tc.clientPrivKey, quickConfig.PrivateKey.String())
|
||||
assert.Equal(tc.clientVPNIP, quickConfig.Address[0].IP.String())
|
||||
|
||||
if tc.coordinatorPubIP != "" {
|
||||
assert.Equal(tc.coordinatorPubIP, quickConfig.Peers[0].Endpoint.IP.String())
|
||||
}
|
||||
assert.Equal(mtu, quickConfig.MTU)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApply(t *testing.T) {
|
||||
testKey, err := wgtypes.GeneratePrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := map[string]struct {
|
||||
quickConfig *wgquick.Config
|
||||
upErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"valid": {
|
||||
quickConfig: &wgquick.Config{Config: wgtypes.Config{PrivateKey: &testKey}},
|
||||
},
|
||||
"invalid apply": {
|
||||
quickConfig: &wgquick.Config{Config: wgtypes.Config{PrivateKey: &testKey}},
|
||||
upErr: errors.New("some err"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var ifaceSpy string
|
||||
var cfgSpy *wgquick.Config
|
||||
upSpy := func(cfg *wgquick.Config, iface string) error {
|
||||
ifaceSpy = iface
|
||||
cfgSpy = cfg
|
||||
return tc.upErr
|
||||
}
|
||||
|
||||
handler := &ConfigHandler{up: upSpy}
|
||||
|
||||
err := handler.Apply(tc.quickConfig)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
assert.Equal(interfaceName, ifaceSpy)
|
||||
assert.Equal(tc.quickConfig, cfgSpy)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
testKey, err := wgtypes.GeneratePrivateKey()
|
||||
require.NoError(err)
|
||||
|
||||
testCases := map[string]struct {
|
||||
quickConfig *wgquick.Config
|
||||
wantErr bool
|
||||
}{
|
||||
"valid": {
|
||||
quickConfig: &wgquick.Config{Config: wgtypes.Config{PrivateKey: &testKey}},
|
||||
},
|
||||
"invalid config": {
|
||||
quickConfig: &wgquick.Config{Config: wgtypes.Config{}},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
handler := &ConfigHandler{}
|
||||
|
||||
data, err := handler.Marshal(tc.quickConfig)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
assert.Greater(len(data), 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -6,7 +6,6 @@ import (
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@ -15,13 +14,10 @@ import (
|
||||
qemucloud "github.com/edgelesssys/constellation/coordinator/cloudprovider/qemu"
|
||||
"github.com/edgelesssys/constellation/coordinator/config"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/diskencryption"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/kubectl"
|
||||
"github.com/edgelesssys/constellation/coordinator/logging"
|
||||
"github.com/edgelesssys/constellation/coordinator/util"
|
||||
"github.com/edgelesssys/constellation/coordinator/wireguard"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/azure"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/gcp"
|
||||
@ -29,7 +25,6 @@ import (
|
||||
"github.com/edgelesssys/constellation/internal/attestation/simulator"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/vtpm"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap"
|
||||
"github.com/spf13/afero"
|
||||
@ -37,17 +32,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultIP = "0.0.0.0"
|
||||
defaultPort = "9000"
|
||||
defaultEtcdEndpoint = "127.0.0.1:2379"
|
||||
defaultIP = "0.0.0.0"
|
||||
defaultPort = "9000"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var bindIP, bindPort, etcdEndpoint string
|
||||
var enforceEtcdTls bool
|
||||
var kube core.Cluster
|
||||
var bindIP, bindPort string
|
||||
var clusterInitJoiner ClusterInitJoiner
|
||||
var coreMetadata core.ProviderMetadata
|
||||
var encryptedDisk core.EncryptedDisk
|
||||
var cloudLogger logging.CloudLogger
|
||||
cfg := zap.NewDevelopmentConfig()
|
||||
|
||||
@ -66,14 +58,7 @@ func main() {
|
||||
}
|
||||
zapLoggerCore := zapLogger.Named("core")
|
||||
|
||||
wg, err := wireguard.New()
|
||||
if err != nil {
|
||||
zapLogger.Panic("error opening wgctrl client")
|
||||
}
|
||||
defer wg.Close()
|
||||
|
||||
var issuer core.QuoteIssuer
|
||||
var validator core.QuoteValidator
|
||||
var issuer atls.Issuer
|
||||
var openTPM vtpm.TPMOpenFunc
|
||||
var fs afero.Fs
|
||||
|
||||
@ -88,7 +73,6 @@ func main() {
|
||||
}
|
||||
|
||||
issuer = gcp.NewIssuer()
|
||||
validator = gcp.NewValidator(pcrs)
|
||||
|
||||
gcpClient, err := gcpcloud.NewClient(context.Background())
|
||||
if err != nil {
|
||||
@ -108,15 +92,12 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
kube = kubernetes.New(
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"gcp", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), &gcpcloud.CloudControllerManager{},
|
||||
&gcpcloud.CloudNodeManager{}, &gcpcloud.Autoscaler{}, metadata, pcrsJSON,
|
||||
)
|
||||
encryptedDisk = diskencryption.New()
|
||||
bindIP = defaultIP
|
||||
bindPort = defaultPort
|
||||
etcdEndpoint = defaultEtcdEndpoint
|
||||
enforceEtcdTls = true
|
||||
openTPM = vtpm.OpenVTPM
|
||||
fs = afero.NewOsFs()
|
||||
case "azure":
|
||||
@ -126,7 +107,6 @@ func main() {
|
||||
}
|
||||
|
||||
issuer = azure.NewIssuer()
|
||||
validator = azure.NewValidator(pcrs)
|
||||
|
||||
metadata, err := azurecloud.NewMetadata(context.Background())
|
||||
if err != nil {
|
||||
@ -141,16 +121,13 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
kube = kubernetes.New(
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"azure", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), azurecloud.NewCloudControllerManager(metadata),
|
||||
&azurecloud.CloudNodeManager{}, &azurecloud.Autoscaler{}, metadata, pcrsJSON,
|
||||
)
|
||||
|
||||
encryptedDisk = diskencryption.New()
|
||||
bindIP = defaultIP
|
||||
bindPort = defaultPort
|
||||
etcdEndpoint = defaultEtcdEndpoint
|
||||
enforceEtcdTls = true
|
||||
openTPM = vtpm.OpenVTPM
|
||||
fs = afero.NewOsFs()
|
||||
case "qemu":
|
||||
@ -160,7 +137,6 @@ func main() {
|
||||
}
|
||||
|
||||
issuer = qemu.NewIssuer()
|
||||
validator = qemu.NewValidator(pcrs)
|
||||
|
||||
cloudLogger = qemucloud.NewLogger()
|
||||
metadata := &qemucloud.Metadata{}
|
||||
@ -168,30 +144,23 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
kube = kubernetes.New(
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"qemu", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), &qemucloud.CloudControllerManager{},
|
||||
&qemucloud.CloudNodeManager{}, &qemucloud.Autoscaler{}, metadata, pcrsJSON,
|
||||
)
|
||||
coreMetadata = metadata
|
||||
|
||||
encryptedDisk = diskencryption.New()
|
||||
bindIP = defaultIP
|
||||
bindPort = defaultPort
|
||||
etcdEndpoint = defaultEtcdEndpoint
|
||||
enforceEtcdTls = true
|
||||
openTPM = vtpm.OpenVTPM
|
||||
fs = afero.NewOsFs()
|
||||
default:
|
||||
issuer = atls.NewFakeIssuer(oid.Dummy{})
|
||||
validator = atls.NewFakeValidator(oid.Dummy{})
|
||||
kube = &core.ClusterFake{}
|
||||
clusterInitJoiner = &core.ClusterFake{}
|
||||
coreMetadata = &core.ProviderMetadataFake{}
|
||||
cloudLogger = &logging.NopLogger{}
|
||||
encryptedDisk = &core.EncryptedDiskFake{}
|
||||
bindIP = defaultIP
|
||||
bindPort = defaultPort
|
||||
etcdEndpoint = "etcd-storage:2379"
|
||||
enforceEtcdTls = false
|
||||
var simulatedTPMCloser io.Closer
|
||||
openTPM, simulatedTPMCloser = simulator.NewSimulatedTPMOpenFunc()
|
||||
defer simulatedTPMCloser.Close()
|
||||
@ -199,9 +168,8 @@ func main() {
|
||||
}
|
||||
|
||||
fileHandler := file.NewHandler(fs)
|
||||
netDialer := &net.Dialer{}
|
||||
dialer := dialer.New(nil, validator, netDialer)
|
||||
run(issuer, wg, openTPM, util.GetIPAddr, dialer, fileHandler, kube,
|
||||
coreMetadata, encryptedDisk, etcdEndpoint, enforceEtcdTls, bindIP,
|
||||
|
||||
run(issuer, openTPM, fileHandler, clusterInitJoiner,
|
||||
coreMetadata, bindIP,
|
||||
bindPort, zapLoggerCore, cloudLogger, fs)
|
||||
}
|
||||
|
@ -1,36 +1,25 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/internal/initserver"
|
||||
"github.com/edgelesssys/constellation/coordinator/internal/joinclient"
|
||||
"github.com/edgelesssys/constellation/coordinator/logging"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/store"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/vtpm"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap"
|
||||
grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags"
|
||||
"github.com/spf13/afero"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var version = "0.0.0"
|
||||
|
||||
func run(issuer core.QuoteIssuer, vpn core.VPN, tpm vtpm.TPMOpenFunc, getPublicIPAddr func() (string, error), dialer *dialer.Dialer, fileHandler file.Handler,
|
||||
kube core.Cluster, metadata core.ProviderMetadata, disk core.EncryptedDisk, etcdEndpoint string, etcdTLS bool, bindIP, bindPort string, logger *zap.Logger,
|
||||
func run(issuer core.QuoteIssuer, tpm vtpm.TPMOpenFunc, fileHandler file.Handler,
|
||||
kube ClusterInitJoiner, metadata core.ProviderMetadata,
|
||||
bindIP, bindPort string, logger *zap.Logger,
|
||||
cloudLogger logging.CloudLogger, fs afero.Fs,
|
||||
) {
|
||||
defer logger.Sync()
|
||||
@ -39,135 +28,34 @@ func run(issuer core.QuoteIssuer, vpn core.VPN, tpm vtpm.TPMOpenFunc, getPublicI
|
||||
defer cloudLogger.Close()
|
||||
cloudLogger.Disclose("Coordinator started running...")
|
||||
|
||||
creds := atlscredentials.New(issuer, nil)
|
||||
|
||||
etcdStoreFactory := store.NewEtcdStoreFactory(etcdEndpoint, etcdTLS, logger)
|
||||
linuxUserManager := user.NewLinuxUserManager(fs)
|
||||
core, err := core.NewCore(vpn, kube, metadata, disk, logger, tpm, etcdStoreFactory, fileHandler, linuxUserManager)
|
||||
nodeActivated, err := vtpm.IsNodeInitialized(tpm)
|
||||
if err != nil {
|
||||
logger.Fatal("failed to create core", zap.Error(err))
|
||||
logger.Fatal("failed to check for previous activation using vTPM", zap.Error(err))
|
||||
}
|
||||
|
||||
vapiServer := &vpnAPIServer{logger: logger.Named("vpnapi"), core: core}
|
||||
loggerPubAPI := logger.Named("pubapi")
|
||||
papi := pubapi.New(loggerPubAPI, cloudLogger, core, dialer, vapiServer, getPublicIPAddr, pubapi.GetRecoveryPeerFromContext)
|
||||
// initialize state machine and wait for re-joining of the VPN (if applicable)
|
||||
nodeActivated, err := core.Initialize(context.TODO(), dialer, papi)
|
||||
if err != nil {
|
||||
logger.Fatal("failed to initialize core", zap.Error(err))
|
||||
}
|
||||
|
||||
zapLoggergRPC := loggerPubAPI.Named("gRPC")
|
||||
|
||||
grpcServer := grpc.NewServer(
|
||||
grpc.Creds(creds),
|
||||
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
|
||||
grpc_ctxtags.StreamServerInterceptor(),
|
||||
grpc_zap.StreamServerInterceptor(zapLoggergRPC),
|
||||
)),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
|
||||
grpc_ctxtags.UnaryServerInterceptor(),
|
||||
grpc_zap.UnaryServerInterceptor(zapLoggergRPC),
|
||||
)),
|
||||
)
|
||||
pubproto.RegisterAPIServer(grpcServer, papi)
|
||||
|
||||
lis, err := net.Listen("tcp", net.JoinHostPort(bindIP, bindPort))
|
||||
if err != nil {
|
||||
zapLoggergRPC.Fatal("failed to create listener", zap.Error(err))
|
||||
}
|
||||
zapLoggergRPC.Info("server listener created", zap.String("address", lis.Addr().String()))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := grpcServer.Serve(lis); err != nil {
|
||||
zapLoggergRPC.Fatal("failed to serve gRPC", zap.Error(err))
|
||||
if nodeActivated {
|
||||
if err := kube.StartKubelet(); err != nil {
|
||||
logger.Fatal("failed to restart kubelet", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
if !nodeActivated {
|
||||
zapLoggerStartupJoin := logger.Named("startup-join")
|
||||
if err := tryJoinClusterOnStartup(getPublicIPAddr, metadata, zapLoggerStartupJoin); err != nil {
|
||||
zapLoggerStartupJoin.Info("joining existing cluster on startup failed. Waiting for connection.", zap.Error(err))
|
||||
}
|
||||
nodeLock := &sync.Mutex{}
|
||||
initServer := initserver.New(nodeLock, kube, logger)
|
||||
|
||||
dialer := dialer.New(issuer, nil, &net.Dialer{})
|
||||
joinClient := joinclient.New(nodeLock, dialer, kube, metadata, logger)
|
||||
|
||||
joinClient.Start()
|
||||
defer joinClient.Stop()
|
||||
|
||||
if err := initServer.Serve(bindIP, bindPort); err != nil {
|
||||
logger.Error("Failed to serve init server", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func tryJoinClusterOnStartup(getPublicIPAddr func() (string, error), metadata core.ProviderMetadata, logger *zap.Logger) error {
|
||||
nodePublicIP, err := getPublicIPAddr()
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving own public ip: %w", err)
|
||||
}
|
||||
if !metadata.Supported() {
|
||||
logger.Info("Metadata API not implemented for cloud provider")
|
||||
return errors.New("metadata API not implemented")
|
||||
}
|
||||
coordinatorEndpoints, err := core.CoordinatorEndpoints(context.TODO(), metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving coordinatorEndpoints from cloud provider api: %w", err)
|
||||
}
|
||||
logger.Info("Retrieved endpoints from cloud-provider API", zap.Strings("endpoints", coordinatorEndpoints))
|
||||
|
||||
// We create an client unverified connection, since the node does not need to verify the Coordinator.
|
||||
// ActivateAdditionalNodes triggers the Coordinator to call ActivateAsNode. This rpc lets the Coordinator verify the node.
|
||||
creds := atlscredentials.New(nil, nil)
|
||||
|
||||
// try to notify a coordinator to activate this node
|
||||
for _, coordinatorEndpoint := range coordinatorEndpoints {
|
||||
conn, err := grpc.Dial(coordinatorEndpoint, grpc.WithTransportCredentials(creds))
|
||||
if err != nil {
|
||||
logger.Info("Dial failed:", zap.String("endpoint", coordinatorEndpoint), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
defer conn.Close()
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
logger.Info("Activating as node on startup")
|
||||
_, err = client.ActivateAdditionalNodes(context.Background(), &pubproto.ActivateAdditionalNodesRequest{NodePublicIps: []string{nodePublicIP}})
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New("could not connect to any coordinator endpoint")
|
||||
}
|
||||
|
||||
type vpnAPIServer struct {
|
||||
logger *zap.Logger
|
||||
core vpnapi.Core
|
||||
listener net.Listener
|
||||
server *grpc.Server
|
||||
}
|
||||
|
||||
func (v *vpnAPIServer) Listen(endpoint string) error {
|
||||
api := vpnapi.New(v.logger, v.core)
|
||||
grpcLogger := v.logger.Named("gRPC")
|
||||
v.server = grpc.NewServer(
|
||||
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
|
||||
grpc_ctxtags.StreamServerInterceptor(),
|
||||
grpc_zap.StreamServerInterceptor(grpcLogger),
|
||||
)),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
|
||||
grpc_ctxtags.UnaryServerInterceptor(),
|
||||
grpc_zap.UnaryServerInterceptor(grpcLogger),
|
||||
)),
|
||||
)
|
||||
vpnproto.RegisterAPIServer(v.server, api)
|
||||
|
||||
lis, err := net.Listen("tcp", endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.listener = lis
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *vpnAPIServer) Serve() error {
|
||||
return v.server.Serve(v.listener)
|
||||
}
|
||||
|
||||
func (v *vpnAPIServer) Close() {
|
||||
if v.server != nil {
|
||||
v.server.GracefulStop()
|
||||
}
|
||||
type ClusterInitJoiner interface {
|
||||
joinclient.ClusterJoiner
|
||||
initserver.ClusterInitializer
|
||||
StartKubelet() error
|
||||
}
|
||||
|
@ -1,404 +0,0 @@
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/logging"
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/coordinator/store"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/simulator"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/testdialer"
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
kms "github.com/edgelesssys/constellation/kms/setup"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m,
|
||||
// https://github.com/census-instrumentation/opencensus-go/issues/1262
|
||||
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
|
||||
)
|
||||
}
|
||||
|
||||
// TestCoordinator tests the integration of packages core, pubapi, and vpnapi. It activates
|
||||
// a coordinator and some nodes and (virtually) sends a packet over the fake VPN.
|
||||
func TestCoordinator(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
nodeIPs := []string{"192.0.2.11", "192.0.2.12", "192.0.2.13"}
|
||||
coordinatorIP := "192.0.2.1"
|
||||
bindPort := "9000"
|
||||
logger := zaptest.NewLogger(t)
|
||||
dialer := testdialer.NewBufconnDialer()
|
||||
netw := newNetwork()
|
||||
|
||||
// spawn 4 peers: 1 designated coordinator and 3 nodes
|
||||
coordServer, coordPAPI, _ := spawnPeer(require, logger.Named("coord"), dialer, netw, net.JoinHostPort(coordinatorIP, bindPort))
|
||||
defer coordPAPI.Close()
|
||||
defer coordServer.GracefulStop()
|
||||
nodeServer1, nodePAPI1, nodeVPN1 := spawnPeer(require, logger.Named("node1"), dialer, netw, net.JoinHostPort(nodeIPs[0], bindPort))
|
||||
defer nodePAPI1.Close()
|
||||
defer nodeServer1.GracefulStop()
|
||||
nodeServer2, nodePAPI2, nodeVPN2 := spawnPeer(require, logger.Named("node2"), dialer, netw, net.JoinHostPort(nodeIPs[1], bindPort))
|
||||
defer nodePAPI2.Close()
|
||||
defer nodeServer2.GracefulStop()
|
||||
nodeServer3, nodePAPI3, nodeVPN3 := spawnPeer(require, logger.Named("node3"), dialer, netw, net.JoinHostPort(nodeIPs[2], bindPort))
|
||||
defer nodePAPI3.Close()
|
||||
defer nodeServer3.GracefulStop()
|
||||
|
||||
require.NoError(activateCoordinator(require, dialer, coordinatorIP, bindPort, nodeIPs))
|
||||
|
||||
// send something from node 1 to node 2
|
||||
|
||||
nodeIP1, err := nodeVPN1.GetInterfaceIP()
|
||||
require.NoError(err)
|
||||
nodeIP2, err := nodeVPN2.GetInterfaceIP()
|
||||
require.NoError(err)
|
||||
assert.NotEqual(nodeIP1, nodeIP2)
|
||||
|
||||
nodeVPN1.send(nodeIP2, "foo")
|
||||
assert.Nil(nodeVPN3.recv())
|
||||
pa := nodeVPN2.recv()
|
||||
require.NotNil(pa)
|
||||
assert.Equal(nodeIP1, pa.src)
|
||||
assert.Equal("foo", pa.data)
|
||||
}
|
||||
|
||||
// TestConcurrent is supposed to detect data races when run with -race.
|
||||
func TestConcurrent(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
nodeIPs := []string{"192.0.2.11", "192.0.2.12"}
|
||||
coordinatorIP := "192.0.2.1"
|
||||
bindPort := "9000"
|
||||
logger := zaptest.NewLogger(t)
|
||||
dialer := testdialer.NewBufconnDialer()
|
||||
netw := newNetwork()
|
||||
|
||||
// spawn peers
|
||||
coordServer, coordPAPI, _ := spawnPeer(require, logger.Named("coord"), dialer, netw, net.JoinHostPort(coordinatorIP, bindPort))
|
||||
defer coordPAPI.Close()
|
||||
defer coordServer.GracefulStop()
|
||||
nodeServer1, nodePAPI1, _ := spawnPeer(require, logger.Named("node1"), dialer, netw, net.JoinHostPort(nodeIPs[0], bindPort))
|
||||
defer nodePAPI1.Close()
|
||||
defer nodeServer1.GracefulStop()
|
||||
nodeServer2, nodePAPI2, _ := spawnPeer(require, logger.Named("node2"), dialer, netw, net.JoinHostPort(nodeIPs[1], bindPort))
|
||||
defer nodePAPI2.Close()
|
||||
defer nodeServer2.GracefulStop()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// This test is a rather rough check for concurrency errors in the pubapi. To this end, various funcs of the pubapi
|
||||
// are called concurrently. As a minimal verification, returned errors are checked.
|
||||
// The coverage of this test alone isn't sufficient. Not all funcs of the pubapi are tested, and arguments are constant.
|
||||
// In the future, we should have something more sophisticated.
|
||||
|
||||
actCoord := func(retErr chan error) {
|
||||
defer wg.Done()
|
||||
retErr <- activateCoordinator(require, dialer, coordinatorIP, bindPort, nodeIPs)
|
||||
}
|
||||
|
||||
actNode := func(papi *pubapi.API) {
|
||||
defer wg.Done()
|
||||
// actNode is called on already activated nodes, so this will fail due to wrong state.
|
||||
assert.Error(papi.ActivateAsNode(nil))
|
||||
}
|
||||
|
||||
updNode := func(papi *pubapi.API, noerr bool) {
|
||||
defer wg.Done()
|
||||
_, err := papi.TriggerNodeUpdate(context.Background(), &pubproto.TriggerNodeUpdateRequest{})
|
||||
if noerr {
|
||||
assert.NoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
getState := func(papi *pubapi.API) {
|
||||
defer wg.Done()
|
||||
// GetState should always succeed, regardless of what happened to the peer before.
|
||||
_, err := papi.GetState(context.Background(), &pubproto.GetStateRequest{})
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
join := func(papi *pubapi.API) {
|
||||
defer wg.Done()
|
||||
// For now, we always pass an empty JoinClusterRequest, so JoinCluster
|
||||
// is expected to fail even if the peer is in the required state.
|
||||
_, err := papi.JoinCluster(context.Background(), &pubproto.JoinClusterRequest{})
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
// activate coordinator and make some other calls concurrently
|
||||
wg.Add(16)
|
||||
actCoordErrs := make(chan error, 2)
|
||||
go actCoord(actCoordErrs)
|
||||
go actCoord(actCoordErrs)
|
||||
// updNode on unactivated node should fail.
|
||||
// updNode on Coordinator should fail.
|
||||
// updNode on Node should succeed, but we don't know whether the node is already activated or not, so we can't expect no error.
|
||||
go updNode(coordPAPI, false)
|
||||
go updNode(coordPAPI, false)
|
||||
go updNode(nodePAPI1, false)
|
||||
go updNode(nodePAPI1, false)
|
||||
go updNode(nodePAPI2, false)
|
||||
go updNode(nodePAPI2, false)
|
||||
go getState(coordPAPI)
|
||||
go getState(coordPAPI)
|
||||
go getState(nodePAPI1)
|
||||
go getState(nodePAPI1)
|
||||
go getState(nodePAPI2)
|
||||
go getState(nodePAPI2)
|
||||
go join(coordPAPI)
|
||||
go join(coordPAPI)
|
||||
wg.Wait()
|
||||
actCoord1HasErr := <-actCoordErrs != nil
|
||||
actCoord2HasErr := <-actCoordErrs != nil
|
||||
require.NotEqual(actCoord1HasErr, actCoord2HasErr, "exactly one actCoord call should succeed")
|
||||
|
||||
// make some concurrent calls on the activated peers
|
||||
wg.Add(26)
|
||||
go actCoord(actCoordErrs)
|
||||
go actCoord(actCoordErrs)
|
||||
go actNode(coordPAPI)
|
||||
go actNode(coordPAPI)
|
||||
go actNode(nodePAPI1)
|
||||
go actNode(nodePAPI1)
|
||||
go actNode(nodePAPI2)
|
||||
go actNode(nodePAPI2)
|
||||
go updNode(coordPAPI, false)
|
||||
go updNode(coordPAPI, false)
|
||||
go updNode(nodePAPI1, true)
|
||||
go updNode(nodePAPI1, true)
|
||||
go updNode(nodePAPI2, true)
|
||||
go updNode(nodePAPI2, true)
|
||||
go getState(coordPAPI)
|
||||
go getState(coordPAPI)
|
||||
go getState(nodePAPI1)
|
||||
go getState(nodePAPI1)
|
||||
go getState(nodePAPI2)
|
||||
go getState(nodePAPI2)
|
||||
go join(coordPAPI)
|
||||
go join(coordPAPI)
|
||||
go join(nodePAPI1)
|
||||
go join(nodePAPI1)
|
||||
go join(nodePAPI2)
|
||||
go join(nodePAPI2)
|
||||
wg.Wait()
|
||||
// One Coordinator is already activated, following both activation calls will fail now.
|
||||
assert.Error(<-actCoordErrs)
|
||||
assert.Error(<-actCoordErrs)
|
||||
}
|
||||
|
||||
func spawnPeer(require *require.Assertions, logger *zap.Logger, netDialer *testdialer.BufconnDialer, netw *network, endpoint string) (*grpc.Server, *pubapi.API, *fakeVPN) {
|
||||
vpn := newVPN(netw, endpoint)
|
||||
fs := afero.NewMemMapFs()
|
||||
cor, err := core.NewCore(vpn, &core.ClusterFake{}, &core.ProviderMetadataFake{}, &core.EncryptedDiskFake{}, logger, simulator.OpenSimulatedTPM, fakeStoreFactory{}, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
require.NoError(cor.AdvanceState(state.AcceptingInit, nil, nil))
|
||||
|
||||
getPublicAddr := func() (string, error) {
|
||||
return "192.0.2.1", nil
|
||||
}
|
||||
dialer := dialer.New(nil, atls.NewFakeValidator(oid.Dummy{}), netDialer)
|
||||
vapiServer := &fakeVPNAPIServer{logger: logger.Named("vpnapi"), core: cor, dialer: netDialer}
|
||||
|
||||
papi := pubapi.New(logger, &logging.NopLogger{}, cor, dialer, vapiServer, getPublicAddr, nil)
|
||||
|
||||
creds := atlscredentials.New(atls.NewFakeIssuer(oid.Dummy{}), nil)
|
||||
server := grpc.NewServer(grpc.Creds(creds))
|
||||
pubproto.RegisterAPIServer(server, papi)
|
||||
|
||||
listener := netDialer.GetListener(endpoint)
|
||||
go server.Serve(listener)
|
||||
|
||||
return server, papi, vpn
|
||||
}
|
||||
|
||||
func activateCoordinator(require *require.Assertions, dialer netDialer, coordinatorIP, bindPort string, nodeIPs []string) error {
|
||||
ctx := context.Background()
|
||||
conn, err := dialGRPC(ctx, dialer, net.JoinHostPort(coordinatorIP, bindPort))
|
||||
require.NoError(err)
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
stream, err := client.ActivateAsCoordinator(ctx, &pubproto.ActivateAsCoordinatorRequest{
|
||||
NodePublicIps: nodeIPs,
|
||||
MasterSecret: []byte("Constellation"),
|
||||
KmsUri: kms.ClusterKMSURI,
|
||||
StorageUri: kms.NoStoreURI,
|
||||
})
|
||||
require.NoError(err)
|
||||
|
||||
for {
|
||||
_, err := stream.Recv()
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func dialGRPC(ctx context.Context, dialer netDialer, target string) (*grpc.ClientConn, error) {
|
||||
creds := atlscredentials.New(nil, atls.NewFakeValidators(oid.Dummy{}))
|
||||
|
||||
return grpc.DialContext(ctx, target,
|
||||
grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return dialer.DialContext(ctx, "tcp", addr)
|
||||
}),
|
||||
grpc.WithTransportCredentials(creds),
|
||||
)
|
||||
}
|
||||
|
||||
type fakeStoreFactory struct{}
|
||||
|
||||
func (fakeStoreFactory) New() (store.Store, error) {
|
||||
return store.NewStdStore(), nil
|
||||
}
|
||||
|
||||
type fakeVPNAPIServer struct {
|
||||
logger *zap.Logger
|
||||
core vpnapi.Core
|
||||
dialer *testdialer.BufconnDialer
|
||||
listener net.Listener
|
||||
server *grpc.Server
|
||||
}
|
||||
|
||||
func (v *fakeVPNAPIServer) Listen(endpoint string) error {
|
||||
api := vpnapi.New(v.logger, v.core)
|
||||
v.server = grpc.NewServer()
|
||||
vpnproto.RegisterAPIServer(v.server, api)
|
||||
v.listener = v.dialer.GetListener(endpoint)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *fakeVPNAPIServer) Serve() error {
|
||||
return v.server.Serve(v.listener)
|
||||
}
|
||||
|
||||
func (v *fakeVPNAPIServer) Close() {
|
||||
if v.server != nil {
|
||||
v.server.GracefulStop()
|
||||
}
|
||||
}
|
||||
|
||||
type network struct {
|
||||
packets map[string][]packet
|
||||
}
|
||||
|
||||
func newNetwork() *network {
|
||||
return &network{packets: make(map[string][]packet)}
|
||||
}
|
||||
|
||||
type packet struct {
|
||||
src string
|
||||
data string
|
||||
}
|
||||
|
||||
type fakeVPN struct {
|
||||
peers map[string]string // vpnIP -> publicIP
|
||||
netw *network
|
||||
publicIP string
|
||||
interfaceIP string
|
||||
}
|
||||
|
||||
func newVPN(netw *network, publicEndpoint string) *fakeVPN {
|
||||
publicIP, _, err := net.SplitHostPort(publicEndpoint)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &fakeVPN{
|
||||
peers: make(map[string]string),
|
||||
netw: netw,
|
||||
publicIP: publicIP,
|
||||
}
|
||||
}
|
||||
|
||||
func (*fakeVPN) Setup(privKey []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*fakeVPN) GetPrivateKey() ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*fakeVPN) GetPublicKey() ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (v *fakeVPN) GetInterfaceIP() (string, error) {
|
||||
return v.interfaceIP, nil
|
||||
}
|
||||
|
||||
func (v *fakeVPN) SetInterfaceIP(ip string) error {
|
||||
v.interfaceIP = ip
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *fakeVPN) AddPeer(pubKey []byte, publicIP string, vpnIP string) error {
|
||||
v.peers[vpnIP] = publicIP
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *fakeVPN) RemovePeer(pubKey []byte) error {
|
||||
panic("dummy")
|
||||
}
|
||||
|
||||
func (v *fakeVPN) UpdatePeers(peers []peer.Peer) error {
|
||||
for _, peer := range peers {
|
||||
if err := v.AddPeer(peer.VPNPubKey, peer.PublicIP, peer.VPNIP); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *fakeVPN) send(dst string, data string) {
|
||||
pubdst := v.peers[dst]
|
||||
packets := v.netw.packets
|
||||
packets[pubdst] = append(packets[pubdst], packet{src: v.publicIP, data: data})
|
||||
}
|
||||
|
||||
func (v *fakeVPN) recv() *packet {
|
||||
packets := v.netw.packets
|
||||
queue := packets[v.publicIP]
|
||||
if len(queue) == 0 {
|
||||
return nil
|
||||
}
|
||||
packet := queue[0]
|
||||
packets[v.publicIP] = queue[1:]
|
||||
for vpnIP, pubIP := range v.peers {
|
||||
if pubIP == packet.src {
|
||||
packet.src = vpnIP
|
||||
}
|
||||
}
|
||||
return &packet
|
||||
}
|
||||
|
||||
type netDialer interface {
|
||||
DialContext(ctx context.Context, network, address string) (net.Conn, error)
|
||||
}
|
@ -1,67 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
// SetNodeActive activates as node and joins the cluster.
|
||||
func (c *Core) SetNodeActive(diskKey, ownerID, clusterID []byte, kubeAPIendpoint, token, discoveryCACertHash string) (reterr error) {
|
||||
c.mut.Lock()
|
||||
defer c.mut.Unlock()
|
||||
|
||||
if err := c.RequireState(state.AcceptingInit); err != nil {
|
||||
return fmt.Errorf("node is not in required state for activation: %w", err)
|
||||
}
|
||||
|
||||
if len(ownerID) == 0 || len(clusterID) == 0 {
|
||||
c.zaplogger.Error("Missing data to taint worker node as initialized")
|
||||
return errors.New("missing data to taint worker node as initialized")
|
||||
}
|
||||
|
||||
// If any of the following actions fail, we cannot revert.
|
||||
// Thus, mark this peer as failed.
|
||||
defer func() {
|
||||
if reterr != nil {
|
||||
_ = c.AdvanceState(state.Failed, nil, nil)
|
||||
}
|
||||
}()
|
||||
|
||||
// AdvanceState MUST be called before any other functions that are not sanity checks or otherwise required
|
||||
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
|
||||
// Any new additions to ActivateAsNode MUST come after
|
||||
if err := c.AdvanceState(state.IsNode, ownerID, clusterID); err != nil {
|
||||
return fmt.Errorf("advancing node state: %w", err)
|
||||
}
|
||||
|
||||
// TODO: SSH keys are currently not available from the Aaas, so we can't create user SSH keys here.
|
||||
|
||||
if err := c.PersistNodeState(role.Node, "", ownerID, clusterID); err != nil {
|
||||
return fmt.Errorf("persisting node state: %w", err)
|
||||
}
|
||||
|
||||
if err := c.UpdateDiskPassphrase(string(diskKey)); err != nil {
|
||||
return fmt.Errorf("updateing disk passphrase: %w", err)
|
||||
}
|
||||
|
||||
btd := &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: kubeAPIendpoint,
|
||||
Token: token,
|
||||
CACertHashes: []string{discoveryCACertHash},
|
||||
}
|
||||
if err := c.JoinCluster(context.TODO(), btd, "", role.Node); err != nil {
|
||||
return fmt.Errorf("joining Kubernetes cluster: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetCoordinatorActive activates as coordinator.
|
||||
func (c *Core) SetCoordinatorActive() error {
|
||||
panic("not implemented")
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
)
|
||||
|
||||
// QuoteValidator validates quotes.
|
||||
type QuoteValidator interface {
|
||||
oid.Getter
|
||||
|
||||
// Validate validates a quote and returns the user data on success.
|
||||
Validate(attDoc []byte, nonce []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// QuoteIssuer issues quotes.
|
||||
type QuoteIssuer interface {
|
||||
oid.Getter
|
||||
|
||||
// Issue issues a quote for remote attestation for a given message
|
||||
Issue(userData []byte, nonce []byte) (quote []byte, err error)
|
||||
}
|
@ -1,84 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
)
|
||||
|
||||
var ErrUnimplemented = errors.New("unimplemented")
|
||||
|
||||
const (
|
||||
ConstellationUIDMetadataKey = "constellation-uid"
|
||||
coordinatorPort = "9000"
|
||||
RoleMetadataKey = "constellation-role"
|
||||
VPNIPMetadataKey = "constellation-vpn-ip"
|
||||
)
|
||||
|
||||
// ProviderMetadata implementers read/write cloud provider metadata.
|
||||
type ProviderMetadata interface {
|
||||
// List retrieves all instances belonging to the current constellation.
|
||||
List(ctx context.Context) ([]cloudtypes.Instance, error)
|
||||
// Self retrieves the current instance.
|
||||
Self(ctx context.Context) (cloudtypes.Instance, error)
|
||||
// SignalRole signals the constellation role via cloud provider metadata (if supported by the CSP and deployment type, otherwise does nothing).
|
||||
SignalRole(ctx context.Context, role role.Role) error
|
||||
// SetVPNIP stores the internally used VPN IP in cloud provider metadata (if supported and required for autoscaling by the CSP, otherwise does nothing).
|
||||
SetVPNIP(ctx context.Context, vpnIP string) error
|
||||
// Supported is used to determine if metadata API is implemented for this cloud provider.
|
||||
Supported() bool
|
||||
}
|
||||
|
||||
type ProviderMetadataFake struct{}
|
||||
|
||||
func (f *ProviderMetadataFake) List(ctx context.Context) ([]cloudtypes.Instance, error) {
|
||||
self, err := f.Self(ctx)
|
||||
return []cloudtypes.Instance{self}, err
|
||||
}
|
||||
|
||||
func (f *ProviderMetadataFake) Self(ctx context.Context) (cloudtypes.Instance, error) {
|
||||
return cloudtypes.Instance{
|
||||
Name: "instanceName",
|
||||
ProviderID: "fake://instance-id",
|
||||
Role: role.Unknown,
|
||||
PrivateIPs: []string{"192.0.2.1"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *ProviderMetadataFake) SignalRole(ctx context.Context, role role.Role) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ProviderMetadataFake) SetVPNIP(ctx context.Context, vpnIP string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ProviderMetadataFake) Supported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// CoordinatorEndpoints retrieves a list of constellation coordinator endpoint candidates from the cloud provider API.
|
||||
func CoordinatorEndpoints(ctx context.Context, metadata ProviderMetadata) ([]string, error) {
|
||||
if !metadata.Supported() {
|
||||
return nil, errors.New("retrieving instances list from cloud provider is not yet supported")
|
||||
}
|
||||
instances, err := metadata.List(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving instances list from cloud provider: %w", err)
|
||||
}
|
||||
coordinatorEndpoints := []string{}
|
||||
for _, instance := range instances {
|
||||
// check if role of instance is "Coordinator"
|
||||
if instance.Role == role.Coordinator {
|
||||
for _, ip := range instance.PrivateIPs {
|
||||
coordinatorEndpoints = append(coordinatorEndpoints, net.JoinHostPort(ip, coordinatorPort))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return coordinatorEndpoints, nil
|
||||
}
|
@ -1,116 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCoordinatorEndpoints(t *testing.T) {
|
||||
err := errors.New("some err")
|
||||
|
||||
testCases := map[string]struct {
|
||||
metadata stubMetadata
|
||||
wantErr bool
|
||||
wantEndpoints []string
|
||||
}{
|
||||
"getting coordinator endpoints works and role is checked": {
|
||||
metadata: stubMetadata{
|
||||
listRes: []cloudtypes.Instance{
|
||||
{
|
||||
Name: "someInstanceA",
|
||||
Role: role.Coordinator,
|
||||
ProviderID: "provider://somePath/someInstanceA",
|
||||
PrivateIPs: []string{"192.0.2.1"},
|
||||
},
|
||||
{
|
||||
Name: "someInstanceB",
|
||||
Role: role.Node,
|
||||
ProviderID: "provider://somePath/someInstanceB",
|
||||
PrivateIPs: []string{"192.0.2.2"},
|
||||
},
|
||||
},
|
||||
supportedRes: true,
|
||||
},
|
||||
wantErr: false,
|
||||
wantEndpoints: []string{"192.0.2.1:9000"},
|
||||
},
|
||||
"List fails": {
|
||||
metadata: stubMetadata{
|
||||
listErr: err,
|
||||
supportedRes: true,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"metadata API unsupported": {
|
||||
metadata: stubMetadata{},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
endpoints, err := CoordinatorEndpoints(context.Background(), &tc.metadata)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
assert.ElementsMatch(tc.wantEndpoints, endpoints)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type stubMetadata struct {
|
||||
listRes []cloudtypes.Instance
|
||||
listErr error
|
||||
selfRes cloudtypes.Instance
|
||||
selfErr error
|
||||
getInstanceRes cloudtypes.Instance
|
||||
getInstanceErr error
|
||||
signalRoleErr error
|
||||
setVPNIPErr error
|
||||
supportedRes bool
|
||||
}
|
||||
|
||||
func (m *stubMetadata) List(ctx context.Context) ([]cloudtypes.Instance, error) {
|
||||
return m.listRes, m.listErr
|
||||
}
|
||||
|
||||
func (m *stubMetadata) Self(ctx context.Context) (cloudtypes.Instance, error) {
|
||||
return m.selfRes, m.selfErr
|
||||
}
|
||||
|
||||
func (m *stubMetadata) GetInstance(ctx context.Context, providerID string) (cloudtypes.Instance, error) {
|
||||
return m.getInstanceRes, m.getInstanceErr
|
||||
}
|
||||
|
||||
func (m *stubMetadata) SignalRole(ctx context.Context, role role.Role) error {
|
||||
return m.signalRoleErr
|
||||
}
|
||||
|
||||
func (m *stubMetadata) SetVPNIP(ctx context.Context, vpnIP string) error {
|
||||
return m.setVPNIPErr
|
||||
}
|
||||
|
||||
func (m *stubMetadata) Supported() bool {
|
||||
return m.supportedRes
|
||||
}
|
||||
|
||||
func (m *stubMetadata) GetSubnetworkCIDR(ctx context.Context) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (m *stubMetadata) GetLoadBalancerIP(ctx context.Context) (string, error) {
|
||||
return "", nil
|
||||
}
|
@ -1,147 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"go.uber.org/zap"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
// GetK8sJoinArgs returns the args needed by a Node to join the cluster.
|
||||
func (c *Core) GetK8sJoinArgs(ctx context.Context) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return c.kube.GetJoinToken(ctx, constants.KubernetesJoinTokenTTL)
|
||||
}
|
||||
|
||||
// GetK8SCertificateKey returns the key needed by a Coordinator to join the cluster.
|
||||
func (c *Core) GetK8SCertificateKey(ctx context.Context) (string, error) {
|
||||
return c.kube.GetKubeadmCertificateKey(ctx)
|
||||
}
|
||||
|
||||
// InitCluster initializes the cluster, stores the join args, and returns the kubeconfig.
|
||||
func (c *Core) InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, id attestationtypes.ID, masterSecret []byte, sshUsers []*pubproto.SSHUserKey,
|
||||
) ([]byte, error) {
|
||||
c.zaplogger.Info("Initializing cluster")
|
||||
vpnIP, err := c.GetVPNIP()
|
||||
if err != nil {
|
||||
c.zaplogger.Error("Retrieving vpn ip failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert SSH users map from protobuffer to map
|
||||
sshUsersMap := make(map[string]string)
|
||||
if len(sshUsers) > 0 {
|
||||
for _, value := range sshUsers {
|
||||
sshUsersMap[value.Username] = value.PublicKey
|
||||
}
|
||||
}
|
||||
if err := c.kube.InitCluster(ctx, autoscalingNodeGroups, cloudServiceAccountURI, vpnIP, id, masterSecret, sshUsersMap); err != nil {
|
||||
c.zaplogger.Error("Initializing cluster failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeconfig, err := c.kube.GetKubeconfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.data().PutKubernetesConfig(kubeconfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set role in cloud provider metadata for autoconfiguration
|
||||
if c.metadata.Supported() {
|
||||
if err := c.metadata.SignalRole(context.TODO(), role.Coordinator); err != nil {
|
||||
c.zaplogger.Info("unable to update role in cloud provider metadata", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return kubeconfig, nil
|
||||
}
|
||||
|
||||
// JoinCluster lets a Node join the cluster.
|
||||
func (c *Core) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, certKey string, peerRole role.Role) error {
|
||||
c.zaplogger.Info("Joining Kubernetes cluster")
|
||||
nodeVPNIP, err := c.vpn.GetInterfaceIP()
|
||||
if err != nil {
|
||||
c.zaplogger.Error("Retrieving vpn ip failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// we need to pass the VPNIP for another control-plane, otherwise etcd will bind itself to the wrong IP address and fails
|
||||
if err := c.kube.JoinCluster(ctx, args, nodeVPNIP, certKey, peerRole); err != nil {
|
||||
c.zaplogger.Error("Joining Kubernetes cluster failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
c.zaplogger.Info("Joined Kubernetes cluster")
|
||||
// set role in cloud provider metadata for autoconfiguration
|
||||
if c.metadata.Supported() {
|
||||
if err := c.metadata.SignalRole(context.TODO(), peerRole); err != nil {
|
||||
c.zaplogger.Info("unable to update role in cloud provider metadata", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cluster manages the overall cluster lifecycle (init, join).
|
||||
type Cluster interface {
|
||||
// InitCluster bootstraps a new cluster with the current node being the master, returning the arguments required to join the cluster.
|
||||
InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, id attestationtypes.ID, masterSecret []byte, sshUsers map[string]string,
|
||||
) error
|
||||
// JoinCluster will join the current node to an existing cluster.
|
||||
JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, nodeVPNIP, certKey string, peerRole role.Role) error
|
||||
// GetKubeconfig reads the kubeconfig from the filesystem. Only succeeds after cluster is initialized.
|
||||
GetKubeconfig() ([]byte, error)
|
||||
// GetKubeadmCertificateKey returns the 64-byte hex string key needed to join the cluster as control-plane. This function must be executed on a control-plane.
|
||||
GetKubeadmCertificateKey(ctx context.Context) (string, error)
|
||||
// GetJoinToken returns a bootstrap (join) token.
|
||||
GetJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error)
|
||||
// StartKubelet starts the kubelet service.
|
||||
StartKubelet() error
|
||||
}
|
||||
|
||||
// ClusterFake behaves like a real cluster, but does not actually initialize or join Kubernetes.
|
||||
type ClusterFake struct{}
|
||||
|
||||
// InitCluster fakes bootstrapping a new cluster with the current node being the master, returning the arguments required to join the cluster.
|
||||
func (c *ClusterFake) InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, id attestationtypes.ID, masterSecret []byte, sshUsers map[string]string,
|
||||
) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// JoinCluster will fake joining the current node to an existing cluster.
|
||||
func (c *ClusterFake) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, nodeVPNIP, certKey string, peerRole role.Role) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetKubeconfig fakes reading the kubeconfig from the filesystem. Only succeeds after cluster is initialized.
|
||||
func (c *ClusterFake) GetKubeconfig() ([]byte, error) {
|
||||
return []byte("kubeconfig"), nil
|
||||
}
|
||||
|
||||
// GetKubeadmCertificateKey fakes generating a certificateKey.
|
||||
func (c *ClusterFake) GetKubeadmCertificateKey(context.Context) (string, error) {
|
||||
return "controlPlaneCertficateKey", nil
|
||||
}
|
||||
|
||||
// GetJoinToken returns a bootstrap (join) token.
|
||||
func (c *ClusterFake) GetJoinToken(ctx context.Context, _ time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "0.0.0.0",
|
||||
Token: "kube-fake-token",
|
||||
CACertHashes: []string{"sha256:a60ebe9b0879090edd83b40a4df4bebb20506bac1e51d518ff8f4505a721930f"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// StartKubelet starts the kubelet service.
|
||||
func (c *ClusterFake) StartKubelet() error {
|
||||
return nil
|
||||
}
|
@ -1,229 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/simulator"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
func TestInitCluster(t *testing.T) {
|
||||
someErr := errors.New("someErr")
|
||||
kubeconfigContent := []byte("kubeconfig")
|
||||
|
||||
testMS := []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
|
||||
testSSHUsers := make([]*pubproto.SSHUserKey, 0)
|
||||
testSSHUser := &pubproto.SSHUserKey{
|
||||
Username: "testUser",
|
||||
PublicKey: "ssh-rsa testKey",
|
||||
}
|
||||
testSSHUsers = append(testSSHUsers, testSSHUser)
|
||||
|
||||
testCases := map[string]struct {
|
||||
cluster Cluster
|
||||
vpn VPN
|
||||
metadata ProviderMetadata
|
||||
masterSecret []byte
|
||||
autoscalingNodeGroups []string
|
||||
sshUsers []*pubproto.SSHUserKey
|
||||
wantErr bool
|
||||
}{
|
||||
"InitCluster works": {
|
||||
cluster: &clusterStub{
|
||||
kubeconfig: kubeconfigContent,
|
||||
},
|
||||
vpn: &stubVPN{interfaceIP: "192.0.2.1"},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
},
|
||||
"InitCluster works even if signal role fails": {
|
||||
cluster: &clusterStub{
|
||||
kubeconfig: kubeconfigContent,
|
||||
},
|
||||
vpn: &stubVPN{interfaceIP: "192.0.2.1"},
|
||||
metadata: &stubMetadata{supportedRes: true, signalRoleErr: someErr},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
},
|
||||
"InitCluster works with SSH and KMS": {
|
||||
cluster: &clusterStub{
|
||||
kubeconfig: kubeconfigContent,
|
||||
},
|
||||
vpn: &stubVPN{interfaceIP: "192.0.2.1"},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
masterSecret: testMS,
|
||||
sshUsers: testSSHUsers,
|
||||
},
|
||||
"cannot get VPN IP": {
|
||||
cluster: &clusterStub{
|
||||
kubeconfig: kubeconfigContent,
|
||||
},
|
||||
vpn: &stubVPN{getInterfaceIPErr: someErr},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
wantErr: true,
|
||||
},
|
||||
"cannot init kubernetes": {
|
||||
cluster: &clusterStub{
|
||||
initErr: someErr,
|
||||
},
|
||||
vpn: &stubVPN{interfaceIP: "192.0.2.1"},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
wantErr: true,
|
||||
},
|
||||
"cannot get kubeconfig": {
|
||||
cluster: &clusterStub{
|
||||
getKubeconfigErr: someErr,
|
||||
},
|
||||
vpn: &stubVPN{interfaceIP: "192.0.2.1"},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
zapLogger, err := zap.NewDevelopment()
|
||||
require.NoError(err)
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(tc.vpn, tc.cluster, tc.metadata, nil, zapLogger, simulator.OpenSimulatedTPM, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
id := attestationtypes.ID{Owner: []byte{0x1}, Cluster: []byte{0x2}}
|
||||
kubeconfig, err := core.InitCluster(context.Background(), tc.autoscalingNodeGroups, "cloud-service-account-uri", id, tc.masterSecret, tc.sshUsers)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(kubeconfigContent, kubeconfig)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJoinCluster(t *testing.T) {
|
||||
someErr := errors.New("someErr")
|
||||
|
||||
testCases := map[string]struct {
|
||||
cluster Cluster
|
||||
metadata ProviderMetadata
|
||||
vpn VPN
|
||||
wantErr bool
|
||||
}{
|
||||
"JoinCluster works": {
|
||||
vpn: &stubVPN{
|
||||
interfaceIP: "192.0.2.0",
|
||||
},
|
||||
cluster: &clusterStub{},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
},
|
||||
"JoinCluster works even if signal role fails": {
|
||||
vpn: &stubVPN{
|
||||
interfaceIP: "192.0.2.0",
|
||||
},
|
||||
cluster: &clusterStub{},
|
||||
metadata: &stubMetadata{supportedRes: true, signalRoleErr: someErr},
|
||||
},
|
||||
"cannot get VPN IP": {
|
||||
vpn: &stubVPN{getInterfaceIPErr: someErr},
|
||||
cluster: &clusterStub{},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
wantErr: true,
|
||||
},
|
||||
"joining kuberentes fails": {
|
||||
vpn: &stubVPN{
|
||||
interfaceIP: "192.0.2.0",
|
||||
},
|
||||
cluster: &clusterStub{joinErr: someErr},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
zapLogger, err := zap.NewDevelopment()
|
||||
require.NoError(err)
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(tc.vpn, tc.cluster, tc.metadata, nil, zapLogger, simulator.OpenSimulatedTPM, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
joinReq := &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "192.0.2.0:6443",
|
||||
Token: "someToken",
|
||||
CACertHashes: []string{"someHash"},
|
||||
}
|
||||
err = core.JoinCluster(context.Background(), joinReq, "", role.Node)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type clusterStub struct {
|
||||
initErr error
|
||||
joinErr error
|
||||
kubeconfig []byte
|
||||
getKubeconfigErr error
|
||||
getJoinTokenResponse *kubeadm.BootstrapTokenDiscovery
|
||||
getJoinTokenErr error
|
||||
startKubeletErr error
|
||||
|
||||
inAutoscalingNodeGroups []string
|
||||
inCloudServiceAccountURI string
|
||||
inVpnIP string
|
||||
}
|
||||
|
||||
func (c *clusterStub) InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, vpnIP string, id attestationtypes.ID, masterSecret []byte, sshUsers map[string]string,
|
||||
) error {
|
||||
c.inAutoscalingNodeGroups = autoscalingNodeGroups
|
||||
c.inCloudServiceAccountURI = cloudServiceAccountURI
|
||||
c.inVpnIP = vpnIP
|
||||
|
||||
return c.initErr
|
||||
}
|
||||
|
||||
func (c *clusterStub) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, nodeVPNIP string, certKey string, peerRole role.Role) error {
|
||||
return c.joinErr
|
||||
}
|
||||
|
||||
func (c *clusterStub) GetKubeconfig() ([]byte, error) {
|
||||
return c.kubeconfig, c.getKubeconfigErr
|
||||
}
|
||||
|
||||
func (c *clusterStub) GetKubeadmCertificateKey(context.Context) (string, error) {
|
||||
return "dummy", nil
|
||||
}
|
||||
|
||||
func (c *clusterStub) GetJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return c.getJoinTokenResponse, c.getJoinTokenErr
|
||||
}
|
||||
|
||||
func (c *clusterStub) StartKubelet() error {
|
||||
return c.startKubeletErr
|
||||
}
|
@ -1,308 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/config"
|
||||
"github.com/edgelesssys/constellation/coordinator/nodestate"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/coordinator/store"
|
||||
"github.com/edgelesssys/constellation/coordinator/storewrapper"
|
||||
"github.com/edgelesssys/constellation/coordinator/util"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/vtpm"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/kms/kms"
|
||||
kmsSetup "github.com/edgelesssys/constellation/kms/setup"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type Core struct {
|
||||
state state.State
|
||||
openTPM vtpm.TPMOpenFunc
|
||||
mut sync.Mutex
|
||||
store store.Store
|
||||
vpn VPN
|
||||
kube Cluster
|
||||
metadata ProviderMetadata
|
||||
encryptedDisk EncryptedDisk
|
||||
kms kms.CloudKMS
|
||||
zaplogger *zap.Logger
|
||||
persistentStoreFactory PersistentStoreFactory
|
||||
initialVPNPeersRetriever initialVPNPeersRetriever
|
||||
lastHeartbeats map[string]time.Time
|
||||
fileHandler file.Handler
|
||||
linuxUserManager user.LinuxUserManager
|
||||
}
|
||||
|
||||
// NewCore creates and initializes a new Core object.
|
||||
func NewCore(vpn VPN, kube Cluster,
|
||||
metadata ProviderMetadata, encryptedDisk EncryptedDisk, zapLogger *zap.Logger, openTPM vtpm.TPMOpenFunc, persistentStoreFactory PersistentStoreFactory, fileHandler file.Handler, linuxUserManager user.LinuxUserManager,
|
||||
) (*Core, error) {
|
||||
stor := store.NewStdStore()
|
||||
c := &Core{
|
||||
openTPM: openTPM,
|
||||
store: stor,
|
||||
vpn: vpn,
|
||||
kube: kube,
|
||||
metadata: metadata,
|
||||
encryptedDisk: encryptedDisk,
|
||||
zaplogger: zapLogger,
|
||||
kms: nil, // KMS is set up during init phase
|
||||
persistentStoreFactory: persistentStoreFactory,
|
||||
initialVPNPeersRetriever: getInitialVPNPeers,
|
||||
lastHeartbeats: make(map[string]time.Time),
|
||||
fileHandler: fileHandler,
|
||||
linuxUserManager: linuxUserManager,
|
||||
}
|
||||
if err := c.data().IncrementPeersResourceVersion(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// GetVPNPubKey returns the peer's VPN public key.
|
||||
func (c *Core) GetVPNPubKey() ([]byte, error) {
|
||||
return c.vpn.GetPublicKey()
|
||||
}
|
||||
|
||||
// GetVPNPubKey returns the peer's VPN public key.
|
||||
func (c *Core) InitializeStoreIPs() error {
|
||||
return c.data().InitializeStoreIPs()
|
||||
}
|
||||
|
||||
// SetVPNIP sets the peer's VPN IP.
|
||||
func (c *Core) SetVPNIP(ip string) error {
|
||||
return c.vpn.SetInterfaceIP(ip)
|
||||
}
|
||||
|
||||
// GetVPNIP returns the cores VPN IP.
|
||||
func (c *Core) GetVPNIP() (string, error) {
|
||||
return c.vpn.GetInterfaceIP()
|
||||
}
|
||||
|
||||
// GetNextNodeIP gets the next free IP-Addr.
|
||||
func (c *Core) GetNextNodeIP() (string, error) {
|
||||
tx, err := c.store.BeginTransaction()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
txwrapper := storewrapper.StoreWrapper{Store: tx}
|
||||
ip, err := txwrapper.PopNextFreeNodeIP()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ip.String(), tx.Commit()
|
||||
}
|
||||
|
||||
// GetNextCoordinatorIP gets the next free IP-Addr.
|
||||
func (c *Core) GetNextCoordinatorIP() (string, error) {
|
||||
tx, err := c.store.BeginTransaction()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
txwrapper := storewrapper.StoreWrapper{Store: tx}
|
||||
ip, err := txwrapper.PopNextFreeCoordinatorIP()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ip.String(), tx.Commit()
|
||||
}
|
||||
|
||||
// SwitchToPersistentStore creates a new store using the persistentStoreFactory and transfers the initial temporary store into it.
|
||||
func (c *Core) SwitchToPersistentStore() error {
|
||||
newStore, err := c.persistentStoreFactory.New()
|
||||
if err != nil {
|
||||
c.zaplogger.Error("error creating persistent store")
|
||||
return err
|
||||
}
|
||||
if err := c.store.Transfer(newStore); err != nil {
|
||||
c.zaplogger.Error("transfer to persistent store failed")
|
||||
return err
|
||||
}
|
||||
c.store = newStore
|
||||
c.zaplogger.Info("Transition to persistent store successful")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIDs returns the ownerID and clusterID.
|
||||
// Pass a masterSecret to generate new IDs.
|
||||
// Pass nil to obtain the existing IDs.
|
||||
func (c *Core) GetIDs(masterSecret []byte) (ownerID, clusterID []byte, err error) {
|
||||
if masterSecret == nil {
|
||||
clusterID, err = c.data().GetClusterID()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
masterSecret, err = c.data().GetMasterSecret()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
clusterID, err = util.GenerateRandomBytes(config.RNGLengthDefault)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := c.data().PutMasterSecret(masterSecret); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Choose a way to salt ownerID
|
||||
ownerID, err = deriveOwnerID(masterSecret)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return ownerID, clusterID, nil
|
||||
}
|
||||
|
||||
// NotifyNodeHeartbeat notifies the core of a received heartbeat from a node.
|
||||
func (c *Core) NotifyNodeHeartbeat(addr net.Addr) {
|
||||
ip := addr.String()
|
||||
now := time.Now()
|
||||
c.mut.Lock()
|
||||
c.lastHeartbeats[ip] = now
|
||||
c.mut.Unlock()
|
||||
}
|
||||
|
||||
// Initialize initializes the state machine of the core and handles re-joining the VPN.
|
||||
// Blocks until the core is ready to be used.
|
||||
func (c *Core) Initialize(ctx context.Context, dialer Dialer, api PubAPI) (nodeActivated bool, err error) {
|
||||
nodeActivated, err = vtpm.IsNodeInitialized(c.openTPM)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("checking for previous activation using vTPM: %w", err)
|
||||
}
|
||||
if !nodeActivated {
|
||||
c.zaplogger.Info("Node was never activated. Allowing node to be activated.")
|
||||
if err := c.vpn.Setup(nil); err != nil {
|
||||
return false, fmt.Errorf("VPN setup: %w", err)
|
||||
}
|
||||
c.state.Advance(state.AcceptingInit)
|
||||
return false, nil
|
||||
}
|
||||
c.zaplogger.Info("Node was previously activated. Attempting re-join.")
|
||||
nodeState, err := nodestate.FromFile(c.fileHandler)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("reading node state: %w", err)
|
||||
}
|
||||
if err := c.vpn.Setup(nodeState.VPNPrivKey); err != nil {
|
||||
return false, fmt.Errorf("VPN setup: %w", err)
|
||||
}
|
||||
|
||||
// restart kubernetes
|
||||
if err := c.kube.StartKubelet(); err != nil {
|
||||
return false, fmt.Errorf("starting kubelet service: %w", err)
|
||||
}
|
||||
|
||||
var initialState state.State
|
||||
switch nodeState.Role {
|
||||
case role.Coordinator:
|
||||
initialState = state.ActivatingNodes
|
||||
err = c.ReinitializeAsCoordinator(ctx, dialer, nodeState.VPNIP, api, retrieveInitialVPNPeersRetryBackoff)
|
||||
case role.Node:
|
||||
initialState = state.IsNode
|
||||
err = c.ReinitializeAsNode(ctx, dialer, nodeState.VPNIP, api, retrieveInitialVPNPeersRetryBackoff)
|
||||
default:
|
||||
return false, fmt.Errorf("invalid node role for initialized node: %v", nodeState.Role)
|
||||
}
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("reinit: %w", err)
|
||||
}
|
||||
c.zaplogger.Info("Re-join successful.")
|
||||
|
||||
c.state.Advance(initialState)
|
||||
return nodeActivated, nil
|
||||
}
|
||||
|
||||
// PersistNodeState persists node state to disk.
|
||||
func (c *Core) PersistNodeState(role role.Role, vpnIP string, ownerID []byte, clusterID []byte) error {
|
||||
vpnPrivKey, err := c.vpn.GetPrivateKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving VPN private key: %w", err)
|
||||
}
|
||||
nodeState := nodestate.NodeState{
|
||||
Role: role,
|
||||
VPNIP: vpnIP,
|
||||
VPNPrivKey: vpnPrivKey,
|
||||
OwnerID: ownerID,
|
||||
ClusterID: clusterID,
|
||||
}
|
||||
return nodeState.ToFile(c.fileHandler)
|
||||
}
|
||||
|
||||
// SetUpKMS sets the Coordinators key management service and key encryption key ID.
|
||||
// Creates a new key encryption key in the KMS, if requested.
|
||||
// Otherwise the KEK is assumed to already exist in the KMS.
|
||||
func (c *Core) SetUpKMS(ctx context.Context, storageURI, kmsURI, kekID string, useExistingKEK bool) error {
|
||||
kms, err := kmsSetup.SetUpKMS(ctx, storageURI, kmsURI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.kms = kms
|
||||
|
||||
if useExistingKEK {
|
||||
return nil
|
||||
}
|
||||
// import Constellation master secret as key encryption key
|
||||
kek, err := c.data().GetMasterSecret()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kms.CreateKEK(ctx, kekID, kek); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.data().PutKEKID(kekID); err != nil {
|
||||
return err
|
||||
}
|
||||
bundeldedKMSInfo := kmsSetup.KMSInformation{KmsUri: kmsURI, KeyEncryptionKeyID: kekID, StorageUri: storageURI}
|
||||
if err := c.data().PutKMSData(bundeldedKMSInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Core) GetKMSInfo() (kmsSetup.KMSInformation, error) {
|
||||
return c.data().GetKMSData()
|
||||
}
|
||||
|
||||
// GetDataKey derives a key of length from the Constellation's master secret.
|
||||
func (c *Core) GetDataKey(ctx context.Context, keyID string, length int) ([]byte, error) {
|
||||
if c.kms == nil {
|
||||
c.zaplogger.Error("trying to request data key before KMS is set up")
|
||||
return nil, errors.New("trying to request data key before KMS is set up")
|
||||
}
|
||||
|
||||
kekID, err := c.data().GetKEKID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.kms.GetDEK(ctx, kekID, keyID, length)
|
||||
}
|
||||
|
||||
func (c *Core) data() storewrapper.StoreWrapper {
|
||||
return storewrapper.StoreWrapper{Store: c.store}
|
||||
}
|
||||
|
||||
type PersistentStoreFactory interface {
|
||||
New() (store.Store, error)
|
||||
}
|
||||
|
||||
// deriveOwnerID uses the Constellation's master secret to derive a unique value tied to that secret.
|
||||
func deriveOwnerID(masterSecret []byte) ([]byte, error) {
|
||||
// TODO: Choose a way to salt the key derivation
|
||||
return util.DeriveKey(masterSecret, []byte("Constellation"), []byte("id"), config.RNGLengthDefault)
|
||||
}
|
||||
|
||||
// Dialer can open grpc client connections with different levels of ATLS encryption / verification.
|
||||
type Dialer interface {
|
||||
Dial(ctx context.Context, target string) (*grpc.ClientConn, error)
|
||||
}
|
@ -1,340 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/nodestate"
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/coordinator/store"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/simulator"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/vtpm"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/testdialer"
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
kms "github.com/edgelesssys/constellation/kms/setup"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m,
|
||||
// https://github.com/census-instrumentation/opencensus-go/issues/1262
|
||||
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
|
||||
)
|
||||
}
|
||||
|
||||
func TestGetNextNodeIP(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
require.NoError(core.InitializeStoreIPs())
|
||||
|
||||
ip, err := core.GetNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal("10.118.0.11", ip)
|
||||
|
||||
ip, err = core.GetNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal("10.118.0.12", ip)
|
||||
|
||||
ip, err = core.GetNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal("10.118.0.13", ip)
|
||||
|
||||
require.NoError(core.data().PutFreedNodeVPNIP("10.118.0.12"))
|
||||
require.NoError(core.data().PutFreedNodeVPNIP("10.118.0.13"))
|
||||
ipsInStore := map[string]struct{}{
|
||||
"10.118.0.13": {},
|
||||
"10.118.0.12": {},
|
||||
}
|
||||
|
||||
ip, err = core.GetNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Contains(ipsInStore, ip)
|
||||
delete(ipsInStore, ip)
|
||||
|
||||
ip, err = core.GetNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Contains(ipsInStore, ip)
|
||||
delete(ipsInStore, ip)
|
||||
|
||||
ip, err = core.GetNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal("10.118.0.14", ip)
|
||||
}
|
||||
|
||||
func TestSwitchToPersistentStore(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
storeFactory := &fakeStoreFactory{}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, storeFactory, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(core.store.Put("test", []byte("test")))
|
||||
require.NoError(err)
|
||||
|
||||
require.NoError(core.SwitchToPersistentStore())
|
||||
value, err := core.store.Get("test")
|
||||
assert.NoError(err)
|
||||
assert.Equal("test", string(value))
|
||||
}
|
||||
|
||||
func TestGetIDs(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
_, _, err = core.GetIDs(nil)
|
||||
assert.Error(err)
|
||||
|
||||
masterSecret := []byte{2, 3, 4}
|
||||
ownerID1, clusterID1, err := core.GetIDs(masterSecret)
|
||||
require.NoError(err)
|
||||
require.NotEmpty(ownerID1)
|
||||
require.NotEmpty(clusterID1)
|
||||
|
||||
require.NoError(core.data().PutClusterID(clusterID1))
|
||||
|
||||
ownerID2, clusterID2, err := core.GetIDs(nil)
|
||||
require.NoError(err)
|
||||
assert.Equal(ownerID1, ownerID2)
|
||||
assert.Equal(clusterID1, clusterID2)
|
||||
}
|
||||
|
||||
func TestNotifyNodeHeartbeat(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
const ip = "192.0.2.1"
|
||||
assert.Empty(core.lastHeartbeats)
|
||||
core.NotifyNodeHeartbeat(&net.IPAddr{IP: net.ParseIP(ip)})
|
||||
assert.Contains(core.lastHeartbeats, ip)
|
||||
}
|
||||
|
||||
func TestDeriveKey(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
// error when no kms is set up
|
||||
_, err = core.GetDataKey(context.Background(), "key-1", 32)
|
||||
assert.Error(err)
|
||||
|
||||
kms := &fakeKMS{}
|
||||
core.kms = kms
|
||||
|
||||
require.NoError(core.store.Put("kekID", []byte("master-key")))
|
||||
|
||||
// error when no master secret is set
|
||||
_, err = core.GetDataKey(context.Background(), "key-1", 32)
|
||||
assert.Error(err)
|
||||
err = core.kms.CreateKEK(context.Background(), "master-key", []byte("Constellation"))
|
||||
require.NoError(err)
|
||||
|
||||
key, err := core.GetDataKey(context.Background(), "key-1", 32)
|
||||
assert.NoError(err)
|
||||
assert.Equal(kms.dek, key)
|
||||
|
||||
kms.getDEKErr = errors.New("error")
|
||||
_, err = core.GetDataKey(context.Background(), "key-1", 32)
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
initializePCRs bool
|
||||
writeNodeState bool
|
||||
role role.Role
|
||||
wantActivated bool
|
||||
wantState state.State
|
||||
wantErr bool
|
||||
}{
|
||||
"fresh node": {
|
||||
wantState: state.AcceptingInit,
|
||||
},
|
||||
"activated coordinator": {
|
||||
initializePCRs: true,
|
||||
writeNodeState: true,
|
||||
role: role.Coordinator,
|
||||
wantActivated: true,
|
||||
wantState: state.ActivatingNodes,
|
||||
},
|
||||
"activated node": {
|
||||
initializePCRs: true,
|
||||
writeNodeState: true,
|
||||
role: role.Node,
|
||||
wantActivated: true,
|
||||
wantState: state.IsNode,
|
||||
},
|
||||
"activated node with no node state": {
|
||||
initializePCRs: true,
|
||||
writeNodeState: false,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
openTPM, simulatedTPMCloser := simulator.NewSimulatedTPMOpenFunc()
|
||||
defer simulatedTPMCloser.Close()
|
||||
if tc.initializePCRs {
|
||||
require.NoError(vtpm.MarkNodeAsInitialized(openTPM, []byte{0x0, 0x1, 0x2, 0x3}, []byte{0x4, 0x5, 0x6, 0x7}))
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
fileHandler := file.NewHandler(fs)
|
||||
if tc.writeNodeState {
|
||||
require.NoError((&nodestate.NodeState{
|
||||
Role: tc.role,
|
||||
VPNPrivKey: []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7},
|
||||
}).ToFile(fileHandler))
|
||||
}
|
||||
core, err := NewCore(&stubVPN{}, &clusterStub{}, &ProviderMetadataFake{}, nil, zaptest.NewLogger(t), openTPM, &fakeStoreFactory{}, fileHandler, user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
core.initialVPNPeersRetriever = fakeInitializeVPNPeersRetriever
|
||||
// prepare store to emulate initialized KMS
|
||||
require.NoError(core.data().PutKMSData(kms.KMSInformation{StorageUri: kms.NoStoreURI, KmsUri: kms.ClusterKMSURI}))
|
||||
require.NoError(core.data().PutMasterSecret([]byte("master-secret")))
|
||||
dialer := dialer.New(nil, atls.NewFakeValidator(oid.Dummy{}), testdialer.NewBufconnDialer())
|
||||
|
||||
nodeActivated, err := core.Initialize(context.Background(), dialer, &stubPubAPI{})
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantActivated, nodeActivated)
|
||||
assert.Equal(tc.wantState, core.state)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistNodeState(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
vpn VPN
|
||||
touchStateFile bool
|
||||
wantErr bool
|
||||
}{
|
||||
"persisting works": {
|
||||
vpn: &stubVPN{
|
||||
privateKey: []byte("private-key"),
|
||||
},
|
||||
},
|
||||
"retrieving VPN key fails": {
|
||||
vpn: &stubVPN{
|
||||
getPrivateKeyErr: errors.New("error"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"writing node state over existing file fails": {
|
||||
vpn: &stubVPN{
|
||||
privateKey: []byte("private-key"),
|
||||
},
|
||||
touchStateFile: true,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
fileHandler := file.NewHandler(fs)
|
||||
if tc.touchStateFile {
|
||||
file, err := fs.Create("/run/state/constellation/node_state.json")
|
||||
require.NoError(err)
|
||||
require.NoError(file.Close())
|
||||
}
|
||||
core, err := NewCore(tc.vpn, nil, nil, nil, zaptest.NewLogger(t), nil, nil, fileHandler, user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
err = core.PersistNodeState(role.Coordinator, "192.0.2.1", []byte("owner-id"), []byte("cluster-id"))
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
nodeState, err := nodestate.FromFile(fileHandler)
|
||||
assert.NoError(err)
|
||||
assert.Equal(nodestate.NodeState{
|
||||
Role: role.Coordinator,
|
||||
VPNIP: "192.0.2.1",
|
||||
VPNPrivKey: []byte("private-key"),
|
||||
OwnerID: []byte("owner-id"),
|
||||
ClusterID: []byte("cluster-id"),
|
||||
}, *nodeState)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeStoreFactory struct {
|
||||
store store.Store
|
||||
}
|
||||
|
||||
func (f *fakeStoreFactory) New() (store.Store, error) {
|
||||
f.store = store.NewStdStore()
|
||||
return f.store, nil
|
||||
}
|
||||
|
||||
type fakeKMS struct {
|
||||
kek []byte
|
||||
dek []byte
|
||||
getDEKErr error
|
||||
}
|
||||
|
||||
func (k *fakeKMS) CreateKEK(ctx context.Context, keyID string, key []byte) error {
|
||||
k.kek = []byte(keyID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *fakeKMS) GetDEK(ctx context.Context, kekID, keyID string, length int) ([]byte, error) {
|
||||
if k.getDEKErr != nil {
|
||||
return nil, k.getDEKErr
|
||||
}
|
||||
if k.kek == nil {
|
||||
return nil, errors.New("error")
|
||||
}
|
||||
return k.dek, nil
|
||||
}
|
||||
|
||||
type stubPubAPI struct {
|
||||
startVPNAPIErr error
|
||||
}
|
||||
|
||||
func (p *stubPubAPI) StartVPNAPIServer(vpnIP string) error {
|
||||
return p.startVPNAPIErr
|
||||
}
|
||||
|
||||
func (p *stubPubAPI) StartUpdateLoop() {}
|
||||
|
||||
func fakeInitializeVPNPeersRetriever(ctx context.Context, dialer Dialer, logger *zap.Logger, metadata ProviderMetadata, ownCoordinatorEndpoint *string) ([]peer.Peer, error) {
|
||||
return []peer.Peer{}, nil
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetDiskUUID gets the disk's UUID.
|
||||
func (c *Core) GetDiskUUID() (string, error) {
|
||||
if err := c.encryptedDisk.Open(); err != nil {
|
||||
return "", fmt.Errorf("retrieving uuid of encrypted disk: cannot open disk: %w", err)
|
||||
}
|
||||
defer c.encryptedDisk.Close()
|
||||
uuid, err := c.encryptedDisk.UUID()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("cannot retrieve uuid of disk: %w", err)
|
||||
}
|
||||
return strings.ToLower(uuid), nil
|
||||
}
|
||||
|
||||
// UpdateDiskPassphrase switches the initial random passphrase of the encrypted disk to a permanent passphrase.
|
||||
func (c *Core) UpdateDiskPassphrase(passphrase string) error {
|
||||
if err := c.encryptedDisk.Open(); err != nil {
|
||||
return fmt.Errorf("updating passphrase of encrypted disk: cannot open disk: %w", err)
|
||||
}
|
||||
defer c.encryptedDisk.Close()
|
||||
return c.encryptedDisk.UpdatePassphrase(passphrase)
|
||||
}
|
||||
|
||||
// EncryptedDisk manages the encrypted state disk.
|
||||
type EncryptedDisk interface {
|
||||
// Open prepares the underlying device for disk operations.
|
||||
Open() error
|
||||
// Close closes the underlying device.
|
||||
Close() error
|
||||
// UUID gets the device's UUID.
|
||||
UUID() (string, error)
|
||||
// UpdatePassphrase switches the initial random passphrase of the encrypted disk to a permanent passphrase.
|
||||
UpdatePassphrase(passphrase string) error
|
||||
}
|
||||
|
||||
type EncryptedDiskFake struct{}
|
||||
|
||||
func (f *EncryptedDiskFake) UUID() (string, error) {
|
||||
return "fake-disk-uuid", nil
|
||||
}
|
||||
|
||||
func (f *EncryptedDiskFake) UpdatePassphrase(passphrase string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *EncryptedDiskFake) Open() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *EncryptedDiskFake) Close() error {
|
||||
return nil
|
||||
}
|
@ -1,125 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestGetDiskUUID(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
wantUUID string
|
||||
openErr error
|
||||
uuidErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"getting uuid works": {
|
||||
wantUUID: "uuid",
|
||||
},
|
||||
"open can fail": {
|
||||
openErr: errors.New("open-error"),
|
||||
wantErr: true,
|
||||
},
|
||||
"getting disk uuid can fail": {
|
||||
uuidErr: errors.New("uuid-err"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
zapLogger, err := zap.NewDevelopment()
|
||||
require.NoError(err)
|
||||
diskStub := encryptedDiskStub{
|
||||
openErr: tc.openErr,
|
||||
uuidErr: tc.uuidErr,
|
||||
uuid: tc.wantUUID,
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, &diskStub, zapLogger, nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
uuid, err := core.GetDiskUUID()
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
assert.Equal(tc.wantUUID, uuid)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateDiskPassphrase(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
openErr error
|
||||
updatePassphraseErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"updating passphrase works": {},
|
||||
"open can fail": {
|
||||
openErr: errors.New("open-error"),
|
||||
wantErr: true,
|
||||
},
|
||||
"updating disk passphrase can fail": {
|
||||
updatePassphraseErr: errors.New("update-err"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
zapLogger, err := zap.NewDevelopment()
|
||||
require.NoError(err)
|
||||
diskStub := encryptedDiskStub{
|
||||
openErr: tc.openErr,
|
||||
updatePassphraseErr: tc.updatePassphraseErr,
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, &diskStub, zapLogger, nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
err = core.UpdateDiskPassphrase("passphrase")
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type encryptedDiskStub struct {
|
||||
openErr error
|
||||
closeErr error
|
||||
uuid string
|
||||
uuidErr error
|
||||
updatePassphraseErr error
|
||||
}
|
||||
|
||||
func (s *encryptedDiskStub) UUID() (string, error) {
|
||||
return s.uuid, s.uuidErr
|
||||
}
|
||||
|
||||
func (s *encryptedDiskStub) UpdatePassphrase(passphrase string) error {
|
||||
return s.updatePassphraseErr
|
||||
}
|
||||
|
||||
func (s *encryptedDiskStub) Open() error {
|
||||
return s.openErr
|
||||
}
|
||||
|
||||
func (s *encryptedDiskStub) Close() error {
|
||||
return s.closeErr
|
||||
}
|
@ -1,234 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/logging"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/simulator"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
kms "github.com/edgelesssys/constellation/kms/setup"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/test/bufconn"
|
||||
)
|
||||
|
||||
// DEPRECATED test. Don't extend this one, but others or write a new one.
|
||||
// TODO remove as soon as major changes to this test would be needed.
|
||||
func TestLegacyActivateCoordinator(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
adminVPNKey := []byte{2, 3, 4}
|
||||
|
||||
bufDialer := newBufconnDialer()
|
||||
|
||||
nodeCore1, nodeAPI1, err := newMockCoreWithDialer(bufDialer)
|
||||
require.NoError(err)
|
||||
defer nodeAPI1.Close()
|
||||
_, nodeAPI2, err := newMockCoreWithDialer(bufDialer)
|
||||
require.NoError(err)
|
||||
defer nodeAPI2.Close()
|
||||
_, nodeAPI3, err := newMockCoreWithDialer(bufDialer)
|
||||
require.NoError(err)
|
||||
defer nodeAPI3.Close()
|
||||
|
||||
nodeIPs := []string{"192.0.2.11", "192.0.2.12", "192.0.2.13"}
|
||||
coordinatorIP := "192.0.2.1"
|
||||
bindPort := "9000"
|
||||
nodeServer1, err := spawnNode(net.JoinHostPort(nodeIPs[0], bindPort), nodeAPI1, bufDialer)
|
||||
require.NoError(err)
|
||||
defer nodeServer1.GracefulStop()
|
||||
nodeServer2, err := spawnNode(net.JoinHostPort(nodeIPs[1], bindPort), nodeAPI2, bufDialer)
|
||||
require.NoError(err)
|
||||
defer nodeServer2.GracefulStop()
|
||||
nodeServer3, err := spawnNode(net.JoinHostPort(nodeIPs[2], bindPort), nodeAPI3, bufDialer)
|
||||
require.NoError(err)
|
||||
defer nodeServer3.GracefulStop()
|
||||
|
||||
coordinatorCore, coordinatorAPI, err := newMockCoreWithDialer(bufDialer)
|
||||
require.NoError(err)
|
||||
require.NoError(coordinatorCore.SetVPNIP("10.118.0.1"))
|
||||
defer coordinatorAPI.Close()
|
||||
coordinatorServer, err := spawnNode(net.JoinHostPort(coordinatorIP, bindPort), coordinatorAPI, bufDialer)
|
||||
require.NoError(err)
|
||||
defer coordinatorServer.GracefulStop()
|
||||
|
||||
// activate coordinator
|
||||
activationReq := &pubproto.ActivateAsCoordinatorRequest{
|
||||
AdminVpnPubKey: adminVPNKey,
|
||||
NodePublicIps: nodeIPs,
|
||||
MasterSecret: []byte("Constellation"),
|
||||
KmsUri: kms.ClusterKMSURI,
|
||||
StorageUri: kms.NoStoreURI,
|
||||
}
|
||||
testActivationSvr := &stubAVPNActivateCoordinatorServer{}
|
||||
assert.NoError(coordinatorAPI.ActivateAsCoordinator(activationReq, testActivationSvr))
|
||||
|
||||
// Coordinator streams admin conf
|
||||
require.NotEmpty(testActivationSvr.sent)
|
||||
adminConfig := testActivationSvr.sent[len(testActivationSvr.sent)-1].GetAdminConfig()
|
||||
require.NotNil(adminConfig)
|
||||
assert.NotEmpty(adminConfig.AdminVpnIp)
|
||||
assert.NotNil(adminConfig.Kubeconfig)
|
||||
require.NotNil(testActivationSvr.sent[0])
|
||||
require.NotNil(testActivationSvr.sent[0].GetLog())
|
||||
assert.NotEmpty(testActivationSvr.sent[0].GetLog().Message)
|
||||
|
||||
// Coordinator cannot be activated a second time
|
||||
assert.Error(coordinatorAPI.ActivateAsCoordinator(activationReq, testActivationSvr))
|
||||
|
||||
// Assert Coordinator
|
||||
peers := coordinatorCore.vpn.(*stubVPN).peers
|
||||
assert.Less(3, len(peers))
|
||||
// coordinator peers contain admin
|
||||
found := false
|
||||
for _, peer := range peers {
|
||||
if bytes.Equal(adminVPNKey, peer.pubKey) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(found)
|
||||
|
||||
// Assert Node
|
||||
peers = nodeCore1.vpn.(*stubVPN).peers
|
||||
assert.Less(0, len(peers))
|
||||
assert.NotEmpty(peers[0].publicIP)
|
||||
}
|
||||
|
||||
// newMockCoreWithDialer creates a new core object with attestation mock and provided dialer for testing.
|
||||
func newMockCoreWithDialer(bufDialer *bufconnDialer) (*Core, *pubapi.API, error) {
|
||||
zapLogger, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
dialer := dialer.New(nil, atls.NewFakeValidator(oid.Dummy{}), bufDialer)
|
||||
vpn := &stubVPN{}
|
||||
kubeFake := &ClusterFake{}
|
||||
metadataFake := &ProviderMetadataFake{}
|
||||
encryptedDiskFake := &EncryptedDiskFake{}
|
||||
|
||||
getPublicAddr := func() (string, error) {
|
||||
return "192.0.2.1", nil
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(vpn, kubeFake, metadataFake, encryptedDiskFake, zapLogger, simulator.OpenSimulatedTPM, &fakeStoreFactory{}, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := core.AdvanceState(state.AcceptingInit, nil, nil); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
vapiServer := &fakeVPNAPIServer{logger: zapLogger, core: core, dialer: bufDialer}
|
||||
papi := pubapi.New(zapLogger, &logging.NopLogger{}, core, dialer, vapiServer, getPublicAddr, nil)
|
||||
|
||||
return core, papi, nil
|
||||
}
|
||||
|
||||
type bufconnDialer struct {
|
||||
mut sync.Mutex
|
||||
listeners map[string]*bufconn.Listener
|
||||
}
|
||||
|
||||
func newBufconnDialer() *bufconnDialer {
|
||||
return &bufconnDialer{listeners: make(map[string]*bufconn.Listener)}
|
||||
}
|
||||
|
||||
func (b *bufconnDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
b.mut.Lock()
|
||||
listener, ok := b.listeners[address]
|
||||
b.mut.Unlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not connect to server on %v", address)
|
||||
}
|
||||
return listener.DialContext(ctx)
|
||||
}
|
||||
|
||||
func (b *bufconnDialer) addListener(endpoint string, listener *bufconn.Listener) {
|
||||
b.mut.Lock()
|
||||
b.listeners[endpoint] = listener
|
||||
b.mut.Unlock()
|
||||
}
|
||||
|
||||
func spawnNode(endpoint string, testNodeCore *pubapi.API, bufDialer *bufconnDialer) (*grpc.Server, error) {
|
||||
creds := atlscredentials.New(atls.NewFakeIssuer(oid.Dummy{}), nil)
|
||||
|
||||
grpcServer := grpc.NewServer(grpc.Creds(creds))
|
||||
pubproto.RegisterAPIServer(grpcServer, testNodeCore)
|
||||
|
||||
const bufferSize = 8 * 1024
|
||||
listener := bufconn.Listen(bufferSize)
|
||||
bufDialer.addListener(endpoint, listener)
|
||||
|
||||
log.Printf("bufconn server listening at %v", endpoint)
|
||||
|
||||
go func() {
|
||||
if err := grpcServer.Serve(listener); err != nil {
|
||||
log.Fatalf("failed to serve: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return grpcServer, nil
|
||||
}
|
||||
|
||||
type stubAVPNActivateCoordinatorServer struct {
|
||||
grpc.ServerStream
|
||||
|
||||
sendErr error
|
||||
|
||||
sent []*pubproto.ActivateAsCoordinatorResponse
|
||||
}
|
||||
|
||||
func (s *stubAVPNActivateCoordinatorServer) Send(req *pubproto.ActivateAsCoordinatorResponse) error {
|
||||
s.sent = append(s.sent, req)
|
||||
return s.sendErr
|
||||
}
|
||||
|
||||
type fakeVPNAPIServer struct {
|
||||
logger *zap.Logger
|
||||
core vpnapi.Core
|
||||
dialer *bufconnDialer
|
||||
listener net.Listener
|
||||
server *grpc.Server
|
||||
}
|
||||
|
||||
func (v *fakeVPNAPIServer) Listen(endpoint string) error {
|
||||
api := vpnapi.New(v.logger, v.core)
|
||||
v.server = grpc.NewServer()
|
||||
vpnproto.RegisterAPIServer(v.server, api)
|
||||
listener := bufconn.Listen(1024)
|
||||
v.dialer.addListener(endpoint, listener)
|
||||
v.listener = listener
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *fakeVPNAPIServer) Serve() error {
|
||||
return v.server.Serve(v.listener)
|
||||
}
|
||||
|
||||
func (v *fakeVPNAPIServer) Close() {
|
||||
if v.server != nil {
|
||||
v.server.GracefulStop()
|
||||
}
|
||||
}
|
@ -1,89 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/storewrapper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// GetPeers returns the stored peers if the requested version differs from the stored version.
|
||||
// peers include all vpn devices namely Coordinators, Nodes and Admins.
|
||||
func (c *Core) GetPeers(resourceVersion int) (int, []peer.Peer, error) {
|
||||
// Most often there's nothing to do, so first check without an expensive transaction.
|
||||
curVer, err := c.data().GetPeersResourceVersion()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
if curVer == resourceVersion {
|
||||
return curVer, nil, nil
|
||||
}
|
||||
|
||||
tx, err := c.store.BeginTransaction()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
txdata := storewrapper.StoreWrapper{Store: tx}
|
||||
|
||||
txVer, err := txdata.GetPeersResourceVersion()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
peers, err := txdata.GetPeers()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
return txVer, peers, nil
|
||||
}
|
||||
|
||||
// AddPeer adds a peer to the store and the VPN.
|
||||
func (c *Core) AddPeer(peer peer.Peer) error {
|
||||
if err := c.AddPeerToVPN(peer); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.AddPeerToStore(peer)
|
||||
}
|
||||
|
||||
// AddPeerToVPN adds a peer to the the VPN.
|
||||
func (c *Core) AddPeerToVPN(peer peer.Peer) error {
|
||||
// don't add myself to vpn
|
||||
myIP, err := c.vpn.GetInterfaceIP()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if myIP != peer.VPNIP {
|
||||
if err := c.vpn.AddPeer(peer.VPNPubKey, peer.PublicIP, peer.VPNIP); err != nil {
|
||||
c.zaplogger.Error("failed to add peer to VPN", zap.Error(err), zap.String("peer public_ip", peer.PublicIP), zap.String("peer vpn_ip", peer.VPNIP))
|
||||
return err
|
||||
}
|
||||
c.zaplogger.Info("added peer to VPN", zap.String("role", peer.Role.String()), zap.String("coordinator public_ip", peer.PublicIP), zap.String("coordinator vpn_ip", peer.VPNIP))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddPeerToStore adds a peer to the store.
|
||||
func (c *Core) AddPeerToStore(peer peer.Peer) error {
|
||||
tx, err := c.store.BeginTransaction()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
txdata := storewrapper.StoreWrapper{Store: tx}
|
||||
|
||||
if err := txdata.IncrementPeersResourceVersion(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txdata.PutPeer(peer); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.zaplogger.Info("added peer to store", zap.String("peer public_ip", peer.PublicIP), zap.String("peer vpn_ip", peer.VPNIP))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePeers synchronizes the peers known to the store and the vpn with the passed peers.
|
||||
func (c *Core) UpdatePeers(peers []peer.Peer) error {
|
||||
return c.vpn.UpdatePeers(peers)
|
||||
}
|
@ -1,137 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func TestGetPeers(t *testing.T) {
|
||||
peer1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}}
|
||||
peer2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}}
|
||||
|
||||
testCases := map[string]struct {
|
||||
storePeers []peer.Peer
|
||||
resourceVersion int
|
||||
wantPeers []peer.Peer
|
||||
}{
|
||||
"request version 0": { // store has version 2
|
||||
storePeers: []peer.Peer{peer1, peer2},
|
||||
resourceVersion: 0,
|
||||
wantPeers: []peer.Peer{peer1, peer2},
|
||||
},
|
||||
"request version 1": {
|
||||
storePeers: []peer.Peer{peer1, peer2},
|
||||
resourceVersion: 1,
|
||||
wantPeers: []peer.Peer{peer1, peer2},
|
||||
},
|
||||
"request version 2": {
|
||||
storePeers: []peer.Peer{peer1, peer2},
|
||||
resourceVersion: 2,
|
||||
wantPeers: nil,
|
||||
},
|
||||
"request version 3": {
|
||||
storePeers: []peer.Peer{peer1, peer2},
|
||||
resourceVersion: 3,
|
||||
wantPeers: []peer.Peer{peer1, peer2},
|
||||
},
|
||||
"request version 4": {
|
||||
storePeers: []peer.Peer{peer1, peer2},
|
||||
resourceVersion: 4,
|
||||
wantPeers: []peer.Peer{peer1, peer2},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
// prepare store
|
||||
for _, p := range tc.storePeers {
|
||||
require.NoError(core.data().PutPeer(p))
|
||||
}
|
||||
require.NoError(core.data().IncrementPeersResourceVersion())
|
||||
|
||||
resourceVersion, peers, err := core.GetPeers(tc.resourceVersion)
|
||||
require.NoError(err)
|
||||
|
||||
assert.Equal(2, resourceVersion)
|
||||
assert.ElementsMatch(tc.wantPeers, peers)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddPeer(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
testPeer := peer.Peer{
|
||||
PublicIP: "192.0.2.11",
|
||||
VPNIP: "192.0.2.21",
|
||||
VPNPubKey: []byte{2, 3, 4},
|
||||
}
|
||||
wantVPNPeers := []stubVPNPeer{{
|
||||
pubKey: testPeer.VPNPubKey,
|
||||
publicIP: "192.0.2.11",
|
||||
vpnIP: testPeer.VPNIP,
|
||||
}}
|
||||
|
||||
testCases := map[string]struct {
|
||||
peer peer.Peer
|
||||
vpn stubVPN
|
||||
wantErr bool
|
||||
wantVPNPeers []stubVPNPeer
|
||||
wantStorePeers []peer.Peer
|
||||
}{
|
||||
"add peer": {
|
||||
peer: testPeer,
|
||||
wantVPNPeers: wantVPNPeers,
|
||||
wantStorePeers: []peer.Peer{testPeer},
|
||||
},
|
||||
"don't add self to vpn": {
|
||||
peer: testPeer,
|
||||
vpn: stubVPN{interfaceIP: testPeer.VPNIP},
|
||||
wantStorePeers: []peer.Peer{testPeer},
|
||||
},
|
||||
"vpn add peer error": {
|
||||
peer: testPeer,
|
||||
vpn: stubVPN{addPeerErr: someErr},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&tc.vpn, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
err = core.AddPeer(tc.peer)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
assert.Equal(tc.wantVPNPeers, tc.vpn.peers)
|
||||
|
||||
actualStorePeers, err := core.data().GetPeers()
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantStorePeers, actualStorePeers)
|
||||
})
|
||||
}
|
||||
}
|
@ -1,142 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
callTimeout = 20 * time.Second
|
||||
retrieveInitialVPNPeersRetryBackoff = 60 * time.Second
|
||||
)
|
||||
|
||||
// ReinitializeAsCoordinator re-initializes a coordinator.
|
||||
func (c *Core) ReinitializeAsCoordinator(ctx context.Context, dialer Dialer, vpnIP string, api PubAPI, retryBackoff time.Duration) error {
|
||||
if err := c.SetVPNIP(vpnIP); err != nil {
|
||||
return fmt.Errorf("set vpn IP address: %v", err)
|
||||
}
|
||||
|
||||
// TODO: implement (manual) recovery endpoint in cases where no other coordinators are available
|
||||
// or when etcd quorum is lost (when leader election fails)
|
||||
|
||||
ownCoordinatorEndpoint := net.JoinHostPort(vpnIP, coordinatorPort)
|
||||
// try to find active coordinator to add as initial VPN peer
|
||||
// retry until coordinator is found
|
||||
var (
|
||||
initialVPNPeers []peer.Peer
|
||||
err error
|
||||
)
|
||||
for {
|
||||
initialVPNPeers, err = c.initialVPNPeersRetriever(ctx, dialer, c.zaplogger, c.metadata, &ownCoordinatorEndpoint)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(retryBackoff)
|
||||
}
|
||||
|
||||
// add initial peers to the VPN
|
||||
if err := c.UpdatePeers(initialVPNPeers); err != nil {
|
||||
return fmt.Errorf("adding initial peers to vpn: %v", err)
|
||||
}
|
||||
|
||||
// run the VPN-API server
|
||||
if err := api.StartVPNAPIServer(vpnIP); err != nil {
|
||||
return fmt.Errorf("start vpnAPIServer: %v", err)
|
||||
}
|
||||
|
||||
// ATTENTION: STORE HAS TO BE EMPTY (NO OVERLAPPING KEYS) WHEN THIS FUNCTION IS CALLED
|
||||
if err := c.SwitchToPersistentStore(); err != nil {
|
||||
return fmt.Errorf("switch to persistent store: %v", err)
|
||||
}
|
||||
c.zaplogger.Info("Transition to persistent store successful")
|
||||
|
||||
kmsData, err := c.GetKMSInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get kms info: %v", err)
|
||||
}
|
||||
if err := c.SetUpKMS(ctx, kmsData.StorageUri, kmsData.KmsUri, kmsData.KeyEncryptionKeyID, false); err != nil {
|
||||
return fmt.Errorf("setup kms: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReinitializeAsNode re-initializes a node.
|
||||
func (c *Core) ReinitializeAsNode(ctx context.Context, dialer Dialer, vpnIP string, api PubAPI, retryBackoff time.Duration) error {
|
||||
if err := c.SetVPNIP(vpnIP); err != nil {
|
||||
return fmt.Errorf("set vpn IP address: %v", err)
|
||||
}
|
||||
|
||||
// try to find active coordinator to add as initial VPN peer
|
||||
// retry until coordinator is found
|
||||
var (
|
||||
initialVPNPeers []peer.Peer
|
||||
err error
|
||||
)
|
||||
for {
|
||||
initialVPNPeers, err = c.initialVPNPeersRetriever(ctx, dialer, c.zaplogger, c.metadata, nil)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(retryBackoff)
|
||||
}
|
||||
|
||||
// add initial peers to the VPN
|
||||
if err := c.UpdatePeers(initialVPNPeers); err != nil {
|
||||
return fmt.Errorf("adding initial peers to vpn: %v", err)
|
||||
}
|
||||
|
||||
api.StartUpdateLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInitialVPNPeers(ctx context.Context, dialer Dialer, logger *zap.Logger, metadata ProviderMetadata, ownCoordinatorEndpoint *string) ([]peer.Peer, error) {
|
||||
coordinatorEndpoints, err := CoordinatorEndpoints(ctx, metadata)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get coordinator endpoints: %v", err)
|
||||
}
|
||||
// shuffle endpoints using PRNG. While this is not a cryptographically secure random seed,
|
||||
// it is good enough for loadbalancing.
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
rand.Shuffle(len(coordinatorEndpoints), func(i, j int) {
|
||||
coordinatorEndpoints[i], coordinatorEndpoints[j] = coordinatorEndpoints[j], coordinatorEndpoints[i]
|
||||
})
|
||||
|
||||
// try to find active coordinator to retrieve peers
|
||||
for _, coordinatorEndpoint := range coordinatorEndpoints {
|
||||
if ownCoordinatorEndpoint != nil && coordinatorEndpoint == *ownCoordinatorEndpoint {
|
||||
continue
|
||||
}
|
||||
callCTX, cancel := context.WithTimeout(ctx, callTimeout)
|
||||
defer cancel()
|
||||
conn, err := dialer.Dial(callCTX, coordinatorEndpoint)
|
||||
if err != nil {
|
||||
logger.Warn("failed getting VPN peer information from coordinator: dialing failed: ", zap.String("endpoint", coordinatorEndpoint), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
defer conn.Close()
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
resp, err := client.GetVPNPeers(ctx, &pubproto.GetVPNPeersRequest{})
|
||||
if err != nil {
|
||||
logger.Warn("getting VPN peer information from coordinator failed: request failed: ", zap.String("endpoint", coordinatorEndpoint), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
return peer.FromPubProto(resp.Peers), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no active coordinator found. tried %v", coordinatorEndpoints)
|
||||
}
|
||||
|
||||
// PubAPI is the interface for the public API of the coordinator.
|
||||
type PubAPI interface {
|
||||
StartVPNAPIServer(vpnIP string) error
|
||||
StartUpdateLoop()
|
||||
}
|
||||
|
||||
type initialVPNPeersRetriever func(ctx context.Context, dialer Dialer, logger *zap.Logger, metadata ProviderMetadata, ownCoordinatorEndpoint *string) ([]peer.Peer, error)
|
@ -1,286 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/testdialer"
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
kms "github.com/edgelesssys/constellation/kms/setup"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestReinitializeAsNode(t *testing.T) {
|
||||
testPeers := []peer.Peer{
|
||||
{
|
||||
PublicIP: "192.0.2.1",
|
||||
VPNIP: "198.51.100.1",
|
||||
VPNPubKey: []byte{0x1, 0x2, 0x3},
|
||||
Role: role.Coordinator,
|
||||
},
|
||||
}
|
||||
wantedVPNPeers := []stubVPNPeer{
|
||||
{
|
||||
publicIP: "192.0.2.1",
|
||||
vpnIP: "198.51.100.1",
|
||||
pubKey: []byte{0x1, 0x2, 0x3},
|
||||
},
|
||||
}
|
||||
vpnIP := "198.51.100.2"
|
||||
|
||||
testCases := map[string]struct {
|
||||
getInitialVPNPeersResponses []struct {
|
||||
peers []peer.Peer
|
||||
err error
|
||||
}
|
||||
wantErr bool
|
||||
}{
|
||||
"reinitialize as node works": {
|
||||
getInitialVPNPeersResponses: []struct {
|
||||
peers []peer.Peer
|
||||
err error
|
||||
}{{peers: testPeers}},
|
||||
},
|
||||
"reinitialize as node will retry until vpn peers are retrieved": {
|
||||
getInitialVPNPeersResponses: []struct {
|
||||
peers []peer.Peer
|
||||
err error
|
||||
}{
|
||||
{err: errors.New("retrieving vpn peers failed")},
|
||||
{peers: testPeers},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
coordinators := []cloudtypes.Instance{{PrivateIPs: []string{"192.0.2.1"}, Role: role.Coordinator}}
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, atls.NewFakeValidator(oid.Dummy{}), netDialer)
|
||||
server := newPubAPIServer()
|
||||
api := &pubAPIServerStub{responses: tc.getInitialVPNPeersResponses}
|
||||
pubproto.RegisterAPIServer(server, api)
|
||||
go server.Serve(netDialer.GetListener("192.0.2.1:9000"))
|
||||
defer server.Stop()
|
||||
vpn := &stubVPN{}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(vpn, nil, &stubMetadata{listRes: coordinators, supportedRes: true}, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
err = core.ReinitializeAsNode(context.Background(), dialer, vpnIP, &stubPubAPI{}, 0)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
assert.Equal(vpnIP, vpn.interfaceIP)
|
||||
assert.Equal(wantedVPNPeers, vpn.peers)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReinitializeAsCoordinator(t *testing.T) {
|
||||
testPeers := []peer.Peer{
|
||||
{
|
||||
PublicIP: "192.0.2.1",
|
||||
VPNIP: "198.51.100.1",
|
||||
VPNPubKey: []byte{0x1, 0x2, 0x3},
|
||||
Role: role.Coordinator,
|
||||
},
|
||||
}
|
||||
wantedVPNPeers := []stubVPNPeer{
|
||||
{
|
||||
publicIP: "192.0.2.1",
|
||||
vpnIP: "198.51.100.1",
|
||||
pubKey: []byte{0x1, 0x2, 0x3},
|
||||
},
|
||||
}
|
||||
vpnIP := "198.51.100.2"
|
||||
|
||||
testCases := map[string]struct {
|
||||
getInitialVPNPeersResponses []struct {
|
||||
peers []peer.Peer
|
||||
err error
|
||||
}
|
||||
wantErr bool
|
||||
}{
|
||||
"reinitialize as coordinator works": {
|
||||
getInitialVPNPeersResponses: []struct {
|
||||
peers []peer.Peer
|
||||
err error
|
||||
}{{peers: testPeers}},
|
||||
},
|
||||
"reinitialize as coordinator will retry until vpn peers are retrieved": {
|
||||
getInitialVPNPeersResponses: []struct {
|
||||
peers []peer.Peer
|
||||
err error
|
||||
}{
|
||||
{err: errors.New("retrieving vpn peers failed")},
|
||||
{peers: testPeers},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
coordinators := []cloudtypes.Instance{{PrivateIPs: []string{"192.0.2.1"}, Role: role.Coordinator}}
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, atls.NewFakeValidator(oid.Dummy{}), netDialer)
|
||||
server := newPubAPIServer()
|
||||
api := &pubAPIServerStub{responses: tc.getInitialVPNPeersResponses}
|
||||
pubproto.RegisterAPIServer(server, api)
|
||||
go server.Serve(netDialer.GetListener("192.0.2.1:9000"))
|
||||
defer server.Stop()
|
||||
vpn := &stubVPN{}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(vpn, nil, &stubMetadata{listRes: coordinators, supportedRes: true}, nil, zaptest.NewLogger(t), nil, &fakeStoreFactory{}, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
// prepare store to emulate initialized KMS
|
||||
require.NoError(core.data().PutKMSData(kms.KMSInformation{StorageUri: kms.NoStoreURI, KmsUri: kms.ClusterKMSURI}))
|
||||
require.NoError(core.data().PutMasterSecret([]byte("master-secret")))
|
||||
err = core.ReinitializeAsCoordinator(context.Background(), dialer, vpnIP, &stubPubAPI{}, 0)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
assert.Equal(vpnIP, vpn.interfaceIP)
|
||||
assert.Equal(wantedVPNPeers, vpn.peers)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInitialVPNPeers(t *testing.T) {
|
||||
testPeers := []peer.Peer{
|
||||
{
|
||||
PublicIP: "192.0.2.1",
|
||||
VPNIP: "198.51.100.1",
|
||||
VPNPubKey: []byte{0x1, 0x2, 0x3},
|
||||
Role: role.Coordinator,
|
||||
},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
ownCoordinatorEndpoint *string
|
||||
coordinatorIPs []string
|
||||
metadataErr error
|
||||
peers []peer.Peer
|
||||
getVPNPeersErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"getInitialVPNPeers works from worker node": {
|
||||
coordinatorIPs: []string{"192.0.2.1"},
|
||||
peers: testPeers,
|
||||
},
|
||||
"getInitialVPNPeers works from coordinator": {
|
||||
ownCoordinatorEndpoint: proto.String("192.0.2.2:9000"),
|
||||
coordinatorIPs: []string{"192.0.2.1", "192.0.2.2"},
|
||||
peers: testPeers,
|
||||
},
|
||||
"getInitialVPNPeers filters itself": {
|
||||
ownCoordinatorEndpoint: proto.String("192.0.2.2:9000"),
|
||||
coordinatorIPs: []string{"192.0.2.2"},
|
||||
wantErr: true,
|
||||
},
|
||||
"getInitialVPNPeers fails if no coordinators are found": {
|
||||
wantErr: true,
|
||||
},
|
||||
"getInitialVPNPeers fails if metadata API fails to retrieve coordinators": {
|
||||
metadataErr: errors.New("metadata error"),
|
||||
wantErr: true,
|
||||
},
|
||||
"getInitialVPNPeers fails if rpc call fails": {
|
||||
coordinatorIPs: []string{"192.0.2.1"},
|
||||
getVPNPeersErr: errors.New("rpc error"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
coordinators := func(ips []string) []cloudtypes.Instance {
|
||||
instances := []cloudtypes.Instance{}
|
||||
for _, ip := range ips {
|
||||
instances = append(instances, cloudtypes.Instance{PrivateIPs: []string{ip}, Role: role.Coordinator})
|
||||
}
|
||||
return instances
|
||||
}(tc.coordinatorIPs)
|
||||
zapLogger, err := zap.NewDevelopment()
|
||||
require.NoError(err)
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, atls.NewFakeValidator(oid.Dummy{}), netDialer)
|
||||
server := newPubAPIServer()
|
||||
api := &pubAPIServerStub{
|
||||
responses: []struct {
|
||||
peers []peer.Peer
|
||||
err error
|
||||
}{{peers: tc.peers, err: tc.getVPNPeersErr}},
|
||||
}
|
||||
pubproto.RegisterAPIServer(server, api)
|
||||
go server.Serve(netDialer.GetListener("192.0.2.1:9000"))
|
||||
defer server.Stop()
|
||||
peers, err := getInitialVPNPeers(context.Background(), dialer, zapLogger, &stubMetadata{listRes: coordinators, listErr: tc.metadataErr, supportedRes: true}, tc.ownCoordinatorEndpoint)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.peers, peers)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newPubAPIServer() *grpc.Server {
|
||||
creds := atlscredentials.New(atls.NewFakeIssuer(oid.Dummy{}), nil)
|
||||
|
||||
return grpc.NewServer(grpc.Creds(creds))
|
||||
}
|
||||
|
||||
type pubAPIServerStub struct {
|
||||
responses []struct {
|
||||
peers []peer.Peer
|
||||
err error
|
||||
}
|
||||
i int
|
||||
pubproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func (s *pubAPIServerStub) GetVPNPeers(ctx context.Context, in *pubproto.GetVPNPeersRequest) (*pubproto.GetVPNPeersResponse, error) {
|
||||
if len(s.responses) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
resp := s.responses[s.i]
|
||||
s.i = (s.i + 1) % len(s.responses)
|
||||
return &pubproto.GetVPNPeersResponse{
|
||||
Peers: peer.ToPubProto(resp.peers),
|
||||
}, resp.err
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"github.com/edgelesssys/constellation/internal/logger"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// CreateSSHUsers creates UNIX users with respective SSH access on the system the coordinator is running on when defined in the config.
|
||||
func (c *Core) CreateSSHUsers(sshUserKeys []ssh.UserKey) error {
|
||||
sshAccess := ssh.NewAccess(logger.New(logger.JSONLog, zapcore.InfoLevel), c.linuxUserManager)
|
||||
ctx := context.Background()
|
||||
|
||||
for _, pair := range sshUserKeys {
|
||||
if err := sshAccess.DeployAuthorizedKey(ctx, pair); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/vtpm"
|
||||
)
|
||||
|
||||
// GetState returns the current state.
|
||||
func (c *Core) GetState() state.State {
|
||||
return c.state.Get()
|
||||
}
|
||||
|
||||
// RequireState checks if the peer is in one of the desired states and returns an error otherwise.
|
||||
func (c *Core) RequireState(states ...state.State) error {
|
||||
return c.state.Require(states...)
|
||||
}
|
||||
|
||||
// AdvanceState advances the state. It also marks the peer as initialized for the corresponding state transition.
|
||||
func (c *Core) AdvanceState(newState state.State, ownerID, clusterID []byte) error {
|
||||
if newState != state.Failed && c.state.Get() == state.AcceptingInit {
|
||||
if err := c.data().PutClusterID(clusterID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := vtpm.MarkNodeAsInitialized(c.openTPM, ownerID, clusterID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.state.Advance(newState)
|
||||
return nil
|
||||
}
|
@ -1,88 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/simulator"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func TestAdvanceState(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
|
||||
testCases := map[string]struct {
|
||||
initialState state.State
|
||||
newState state.State
|
||||
openTPMErr error
|
||||
wantErr bool
|
||||
wantOpenTPMCalled bool
|
||||
}{
|
||||
"init -> coordinator": {
|
||||
initialState: state.AcceptingInit,
|
||||
newState: state.ActivatingNodes,
|
||||
wantOpenTPMCalled: true,
|
||||
},
|
||||
"init -> node": {
|
||||
initialState: state.AcceptingInit,
|
||||
newState: state.IsNode,
|
||||
wantOpenTPMCalled: true,
|
||||
},
|
||||
"init -> failed": {
|
||||
initialState: state.AcceptingInit,
|
||||
newState: state.Failed,
|
||||
},
|
||||
"uninit -> init": {
|
||||
initialState: state.Uninitialized,
|
||||
newState: state.AcceptingInit,
|
||||
},
|
||||
"openTPM error": {
|
||||
initialState: state.AcceptingInit,
|
||||
newState: state.ActivatingNodes,
|
||||
openTPMErr: someErr,
|
||||
wantErr: true,
|
||||
wantOpenTPMCalled: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
openTPMCalled := false
|
||||
openTPM := func() (io.ReadWriteCloser, error) {
|
||||
openTPMCalled = true
|
||||
if tc.openTPMErr != nil {
|
||||
return nil, tc.openTPMErr
|
||||
}
|
||||
return simulator.OpenSimulatedTPM()
|
||||
}
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), openTPM, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
assert.Equal(state.Uninitialized, core.GetState())
|
||||
core.state = tc.initialState
|
||||
|
||||
err = core.AdvanceState(tc.newState, []byte("secret"), []byte("cluster"))
|
||||
assert.Equal(tc.wantOpenTPMCalled, openTPMCalled)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
assert.Equal(tc.initialState, core.GetState())
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
assert.Equal(tc.newState, core.GetState())
|
||||
})
|
||||
}
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
)
|
||||
|
||||
type VPN interface {
|
||||
Setup(privKey []byte) error
|
||||
GetPrivateKey() ([]byte, error)
|
||||
GetPublicKey() ([]byte, error)
|
||||
GetInterfaceIP() (string, error)
|
||||
SetInterfaceIP(ip string) error
|
||||
AddPeer(pubKey []byte, publicIP string, vpnIP string) error
|
||||
RemovePeer(pubKey []byte) error
|
||||
UpdatePeers(peers []peer.Peer) error
|
||||
}
|
||||
|
||||
type stubVPN struct {
|
||||
peers []stubVPNPeer
|
||||
interfaceIP string
|
||||
privateKey []byte
|
||||
addPeerErr error
|
||||
removePeerErr error
|
||||
getInterfaceIPErr error
|
||||
getPrivateKeyErr error
|
||||
}
|
||||
|
||||
func (*stubVPN) Setup(privKey []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *stubVPN) GetPrivateKey() ([]byte, error) {
|
||||
return v.privateKey, v.getPrivateKeyErr
|
||||
}
|
||||
|
||||
func (*stubVPN) GetPublicKey() ([]byte, error) {
|
||||
return []byte{3, 4, 5}, nil
|
||||
}
|
||||
|
||||
func (v *stubVPN) GetInterfaceIP() (string, error) {
|
||||
return v.interfaceIP, v.getInterfaceIPErr
|
||||
}
|
||||
|
||||
func (v *stubVPN) SetInterfaceIP(ip string) error {
|
||||
v.interfaceIP = ip
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *stubVPN) AddPeer(pubKey []byte, publicIP string, vpnIP string) error {
|
||||
v.peers = append(v.peers, stubVPNPeer{pubKey, publicIP, vpnIP})
|
||||
return v.addPeerErr
|
||||
}
|
||||
|
||||
func (v *stubVPN) RemovePeer(pubKey []byte) error {
|
||||
newPeerList := make([]stubVPNPeer, 0, len(v.peers))
|
||||
for _, v := range v.peers {
|
||||
if !bytes.Equal(v.pubKey, pubKey) {
|
||||
newPeerList = append(newPeerList, v)
|
||||
}
|
||||
}
|
||||
v.peers = newPeerList
|
||||
return v.removePeerErr
|
||||
}
|
||||
|
||||
func (v *stubVPN) UpdatePeers(peers []peer.Peer) error {
|
||||
for _, peer := range peers {
|
||||
if err := v.AddPeer(peer.VPNPubKey, peer.PublicIP, peer.VPNIP); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type stubVPNPeer struct {
|
||||
pubKey []byte
|
||||
publicIP string
|
||||
vpnIP string
|
||||
}
|
394
coordinator/initproto/init.pb.go
Normal file
394
coordinator/initproto/init.pb.go
Normal file
@ -0,0 +1,394 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc v3.20.1
|
||||
// source: init.proto
|
||||
|
||||
package initproto
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type InitRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
AutoscalingNodeGroups []string `protobuf:"bytes,1,rep,name=autoscaling_node_groups,json=autoscalingNodeGroups,proto3" json:"autoscaling_node_groups,omitempty"`
|
||||
MasterSecret []byte `protobuf:"bytes,2,opt,name=master_secret,json=masterSecret,proto3" json:"master_secret,omitempty"`
|
||||
KmsUri string `protobuf:"bytes,3,opt,name=kms_uri,json=kmsUri,proto3" json:"kms_uri,omitempty"`
|
||||
StorageUri string `protobuf:"bytes,4,opt,name=storage_uri,json=storageUri,proto3" json:"storage_uri,omitempty"`
|
||||
KeyEncryptionKeyId string `protobuf:"bytes,5,opt,name=key_encryption_key_id,json=keyEncryptionKeyId,proto3" json:"key_encryption_key_id,omitempty"`
|
||||
UseExistingKek bool `protobuf:"varint,6,opt,name=use_existing_kek,json=useExistingKek,proto3" json:"use_existing_kek,omitempty"`
|
||||
CloudServiceAccountUri string `protobuf:"bytes,7,opt,name=cloud_service_account_uri,json=cloudServiceAccountUri,proto3" json:"cloud_service_account_uri,omitempty"`
|
||||
KubernetesVersion string `protobuf:"bytes,8,opt,name=kubernetes_version,json=kubernetesVersion,proto3" json:"kubernetes_version,omitempty"`
|
||||
SshUserKeys []*SSHUserKey `protobuf:"bytes,9,rep,name=ssh_user_keys,json=sshUserKeys,proto3" json:"ssh_user_keys,omitempty"`
|
||||
}
|
||||
|
||||
func (x *InitRequest) Reset() {
|
||||
*x = InitRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_init_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *InitRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*InitRequest) ProtoMessage() {}
|
||||
|
||||
func (x *InitRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_init_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use InitRequest.ProtoReflect.Descriptor instead.
|
||||
func (*InitRequest) Descriptor() ([]byte, []int) {
|
||||
return file_init_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *InitRequest) GetAutoscalingNodeGroups() []string {
|
||||
if x != nil {
|
||||
return x.AutoscalingNodeGroups
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *InitRequest) GetMasterSecret() []byte {
|
||||
if x != nil {
|
||||
return x.MasterSecret
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *InitRequest) GetKmsUri() string {
|
||||
if x != nil {
|
||||
return x.KmsUri
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *InitRequest) GetStorageUri() string {
|
||||
if x != nil {
|
||||
return x.StorageUri
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *InitRequest) GetKeyEncryptionKeyId() string {
|
||||
if x != nil {
|
||||
return x.KeyEncryptionKeyId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *InitRequest) GetUseExistingKek() bool {
|
||||
if x != nil {
|
||||
return x.UseExistingKek
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *InitRequest) GetCloudServiceAccountUri() string {
|
||||
if x != nil {
|
||||
return x.CloudServiceAccountUri
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *InitRequest) GetKubernetesVersion() string {
|
||||
if x != nil {
|
||||
return x.KubernetesVersion
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *InitRequest) GetSshUserKeys() []*SSHUserKey {
|
||||
if x != nil {
|
||||
return x.SshUserKeys
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type InitResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Kubeconfig []byte `protobuf:"bytes,1,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"`
|
||||
OwnerId []byte `protobuf:"bytes,2,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"`
|
||||
ClusterId []byte `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *InitResponse) Reset() {
|
||||
*x = InitResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_init_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *InitResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*InitResponse) ProtoMessage() {}
|
||||
|
||||
func (x *InitResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_init_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use InitResponse.ProtoReflect.Descriptor instead.
|
||||
func (*InitResponse) Descriptor() ([]byte, []int) {
|
||||
return file_init_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *InitResponse) GetKubeconfig() []byte {
|
||||
if x != nil {
|
||||
return x.Kubeconfig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *InitResponse) GetOwnerId() []byte {
|
||||
if x != nil {
|
||||
return x.OwnerId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *InitResponse) GetClusterId() []byte {
|
||||
if x != nil {
|
||||
return x.ClusterId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SSHUserKey struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
|
||||
PublicKey string `protobuf:"bytes,2,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SSHUserKey) Reset() {
|
||||
*x = SSHUserKey{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_init_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SSHUserKey) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SSHUserKey) ProtoMessage() {}
|
||||
|
||||
func (x *SSHUserKey) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_init_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SSHUserKey.ProtoReflect.Descriptor instead.
|
||||
func (*SSHUserKey) Descriptor() ([]byte, []int) {
|
||||
return file_init_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *SSHUserKey) GetUsername() string {
|
||||
if x != nil {
|
||||
return x.Username
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *SSHUserKey) GetPublicKey() string {
|
||||
if x != nil {
|
||||
return x.PublicKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_init_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_init_proto_rawDesc = []byte{
|
||||
0x0a, 0x0a, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x69, 0x6e,
|
||||
0x69, 0x74, 0x22, 0xa1, 0x03, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e,
|
||||
0x67, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20,
|
||||
0x03, 0x28, 0x09, 0x52, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67,
|
||||
0x4e, 0x6f, 0x64, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x61,
|
||||
0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x0c, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12,
|
||||
0x17, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x06, 0x6b, 0x6d, 0x73, 0x55, 0x72, 0x69, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x72,
|
||||
0x61, 0x67, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73,
|
||||
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x55, 0x72, 0x69, 0x12, 0x31, 0x0a, 0x15, 0x6b, 0x65, 0x79,
|
||||
0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
|
||||
0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6b, 0x65, 0x79, 0x45, 0x6e, 0x63,
|
||||
0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x28, 0x0a, 0x10,
|
||||
0x75, 0x73, 0x65, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x6b,
|
||||
0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74,
|
||||
0x69, 0x6e, 0x67, 0x4b, 0x65, 0x6b, 0x12, 0x39, 0x0a, 0x19, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f,
|
||||
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f,
|
||||
0x75, 0x72, 0x69, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x6c, 0x6f, 0x75, 0x64,
|
||||
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x72,
|
||||
0x69, 0x12, 0x2d, 0x0a, 0x12, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f,
|
||||
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6b,
|
||||
0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
|
||||
0x12, 0x34, 0x0a, 0x0d, 0x73, 0x73, 0x68, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79,
|
||||
0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x53,
|
||||
0x53, 0x48, 0x55, 0x73, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x73, 0x73, 0x68, 0x55, 0x73,
|
||||
0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x68, 0x0a, 0x0c, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x6b, 0x75, 0x62, 0x65, 0x63, 0x6f,
|
||||
0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6b, 0x75, 0x62, 0x65,
|
||||
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f,
|
||||
0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49,
|
||||
0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64,
|
||||
0x22, 0x47, 0x0a, 0x0a, 0x53, 0x53, 0x48, 0x55, 0x73, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x1a,
|
||||
0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75,
|
||||
0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
|
||||
0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x32, 0x34, 0x0a, 0x03, 0x41, 0x50, 0x49,
|
||||
0x12, 0x2d, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x11, 0x2e, 0x69, 0x6e, 0x69, 0x74, 0x2e,
|
||||
0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x69, 0x6e,
|
||||
0x69, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42,
|
||||
0x3c, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64,
|
||||
0x67, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x73, 0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65,
|
||||
0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61,
|
||||
0x74, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_init_proto_rawDescOnce sync.Once
|
||||
file_init_proto_rawDescData = file_init_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_init_proto_rawDescGZIP() []byte {
|
||||
file_init_proto_rawDescOnce.Do(func() {
|
||||
file_init_proto_rawDescData = protoimpl.X.CompressGZIP(file_init_proto_rawDescData)
|
||||
})
|
||||
return file_init_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_init_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_init_proto_goTypes = []interface{}{
|
||||
(*InitRequest)(nil), // 0: init.InitRequest
|
||||
(*InitResponse)(nil), // 1: init.InitResponse
|
||||
(*SSHUserKey)(nil), // 2: init.SSHUserKey
|
||||
}
|
||||
var file_init_proto_depIdxs = []int32{
|
||||
2, // 0: init.InitRequest.ssh_user_keys:type_name -> init.SSHUserKey
|
||||
0, // 1: init.API.Init:input_type -> init.InitRequest
|
||||
1, // 2: init.API.Init:output_type -> init.InitResponse
|
||||
2, // [2:3] is the sub-list for method output_type
|
||||
1, // [1:2] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_init_proto_init() }
|
||||
func file_init_proto_init() {
|
||||
if File_init_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_init_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*InitRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_init_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*InitResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_init_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SSHUserKey); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_init_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_init_proto_goTypes,
|
||||
DependencyIndexes: file_init_proto_depIdxs,
|
||||
MessageInfos: file_init_proto_msgTypes,
|
||||
}.Build()
|
||||
File_init_proto = out.File
|
||||
file_init_proto_rawDesc = nil
|
||||
file_init_proto_goTypes = nil
|
||||
file_init_proto_depIdxs = nil
|
||||
}
|
32
coordinator/initproto/init.proto
Normal file
32
coordinator/initproto/init.proto
Normal file
@ -0,0 +1,32 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package init;
|
||||
|
||||
option go_package = "github.com/edgelesssys/constellation/coordinator/initproto";
|
||||
|
||||
service API {
|
||||
rpc Init(InitRequest) returns (InitResponse);
|
||||
}
|
||||
|
||||
message InitRequest {
|
||||
repeated string autoscaling_node_groups = 1;
|
||||
bytes master_secret = 2;
|
||||
string kms_uri = 3;
|
||||
string storage_uri = 4;
|
||||
string key_encryption_key_id = 5;
|
||||
bool use_existing_kek = 6;
|
||||
string cloud_service_account_uri = 7;
|
||||
string kubernetes_version = 8;
|
||||
repeated SSHUserKey ssh_user_keys = 9;
|
||||
}
|
||||
|
||||
message InitResponse {
|
||||
bytes kubeconfig = 1;
|
||||
bytes owner_id = 2;
|
||||
bytes cluster_id = 3;
|
||||
}
|
||||
|
||||
message SSHUserKey {
|
||||
string username = 1;
|
||||
string public_key = 2;
|
||||
}
|
105
coordinator/initproto/init_grpc.pb.go
Normal file
105
coordinator/initproto/init_grpc.pb.go
Normal file
@ -0,0 +1,105 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.20.1
|
||||
// source: init.proto
|
||||
|
||||
package initproto
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// APIClient is the client API for API service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type APIClient interface {
|
||||
Init(ctx context.Context, in *InitRequest, opts ...grpc.CallOption) (*InitResponse, error)
|
||||
}
|
||||
|
||||
type aPIClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewAPIClient(cc grpc.ClientConnInterface) APIClient {
|
||||
return &aPIClient{cc}
|
||||
}
|
||||
|
||||
func (c *aPIClient) Init(ctx context.Context, in *InitRequest, opts ...grpc.CallOption) (*InitResponse, error) {
|
||||
out := new(InitResponse)
|
||||
err := c.cc.Invoke(ctx, "/init.API/Init", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// APIServer is the server API for API service.
|
||||
// All implementations must embed UnimplementedAPIServer
|
||||
// for forward compatibility
|
||||
type APIServer interface {
|
||||
Init(context.Context, *InitRequest) (*InitResponse, error)
|
||||
mustEmbedUnimplementedAPIServer()
|
||||
}
|
||||
|
||||
// UnimplementedAPIServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedAPIServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedAPIServer) Init(context.Context, *InitRequest) (*InitResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Init not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) mustEmbedUnimplementedAPIServer() {}
|
||||
|
||||
// UnsafeAPIServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to APIServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeAPIServer interface {
|
||||
mustEmbedUnimplementedAPIServer()
|
||||
}
|
||||
|
||||
func RegisterAPIServer(s grpc.ServiceRegistrar, srv APIServer) {
|
||||
s.RegisterService(&API_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _API_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(InitRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).Init(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/init.API/Init",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).Init(ctx, req.(*InitRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// API_ServiceDesc is the grpc.ServiceDesc for API service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var API_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "init.API",
|
||||
HandlerType: (*APIServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Init",
|
||||
Handler: _API_Init_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "init.proto",
|
||||
}
|
192
coordinator/internal/initserver/initserver.go
Normal file
192
coordinator/internal/initserver/initserver.go
Normal file
@ -0,0 +1,192 @@
|
||||
package initserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/config"
|
||||
"github.com/edgelesssys/constellation/coordinator/diskencryption"
|
||||
"github.com/edgelesssys/constellation/coordinator/initproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes"
|
||||
"github.com/edgelesssys/constellation/coordinator/nodestate"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/util"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap"
|
||||
grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
nodeLock *sync.Mutex
|
||||
|
||||
kube ClusterInitializer
|
||||
disk EncryptedDisk
|
||||
fileHandler file.Handler
|
||||
grpcServer *grpc.Server
|
||||
|
||||
logger *zap.Logger
|
||||
|
||||
initproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func New(nodeLock *sync.Mutex, kube ClusterInitializer, logger *zap.Logger) *Server {
|
||||
logger = logger.Named("initServer")
|
||||
server := &Server{
|
||||
nodeLock: nodeLock,
|
||||
disk: diskencryption.New(),
|
||||
kube: kube,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
grpcLogger := logger.Named("gRPC")
|
||||
grpcServer := grpc.NewServer(
|
||||
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
|
||||
grpc_ctxtags.StreamServerInterceptor(),
|
||||
grpc_zap.StreamServerInterceptor(grpcLogger),
|
||||
)),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
|
||||
grpc_ctxtags.UnaryServerInterceptor(),
|
||||
grpc_zap.UnaryServerInterceptor(grpcLogger),
|
||||
)),
|
||||
)
|
||||
initproto.RegisterAPIServer(grpcServer, server)
|
||||
|
||||
server.grpcServer = grpcServer
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
func (s *Server) Serve(ip, port string) error {
|
||||
lis, err := net.Listen("tcp", net.JoinHostPort(ip, port))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to listen: %w", err)
|
||||
}
|
||||
|
||||
return s.grpcServer.Serve(lis)
|
||||
}
|
||||
|
||||
func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initproto.InitResponse, error) {
|
||||
if ok := s.nodeLock.TryLock(); !ok {
|
||||
go s.grpcServer.GracefulStop()
|
||||
return nil, status.Error(codes.FailedPrecondition, "node is already being activated")
|
||||
}
|
||||
|
||||
id, err := s.deriveAttestationID(req.MasterSecret)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s", err)
|
||||
}
|
||||
|
||||
if err := s.setupDisk(req.MasterSecret); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "setting up disk: %s", err)
|
||||
}
|
||||
|
||||
state := nodestate.NodeState{
|
||||
Role: role.Coordinator,
|
||||
OwnerID: id.Owner,
|
||||
ClusterID: id.Cluster,
|
||||
}
|
||||
if err := state.ToFile(s.fileHandler); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "persisting node state: %s", err)
|
||||
}
|
||||
|
||||
kubeconfig, err := s.kube.InitCluster(ctx,
|
||||
req.AutoscalingNodeGroups,
|
||||
req.CloudServiceAccountUri,
|
||||
req.KubernetesVersion,
|
||||
id,
|
||||
kubernetes.KMSConfig{
|
||||
MasterSecret: req.MasterSecret,
|
||||
KMSURI: req.KmsUri,
|
||||
StorageURI: req.StorageUri,
|
||||
KeyEncryptionKeyID: req.KeyEncryptionKeyId,
|
||||
UseExistingKEK: req.UseExistingKek,
|
||||
},
|
||||
sshProtoKeysToMap(req.SshUserKeys),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "initializing cluster: %s", err)
|
||||
}
|
||||
|
||||
return &initproto.InitResponse{
|
||||
Kubeconfig: kubeconfig,
|
||||
OwnerId: id.Owner,
|
||||
ClusterId: id.Cluster,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) setupDisk(masterSecret []byte) error {
|
||||
if err := s.disk.Open(); err != nil {
|
||||
return fmt.Errorf("opening encrypted disk: %w", err)
|
||||
}
|
||||
defer s.disk.Close()
|
||||
|
||||
uuid, err := s.disk.UUID()
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving uuid of disk: %w", err)
|
||||
}
|
||||
uuid = strings.ToLower(uuid)
|
||||
|
||||
// TODO: Choose a way to salt the key derivation
|
||||
diskKey, err := util.DeriveKey(masterSecret, []byte("Constellation"), []byte("key"+uuid), 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.disk.UpdatePassphrase(string(diskKey))
|
||||
}
|
||||
|
||||
func (s *Server) deriveAttestationID(masterSecret []byte) (attestationtypes.ID, error) {
|
||||
clusterID, err := util.GenerateRandomBytes(config.RNGLengthDefault)
|
||||
if err != nil {
|
||||
return attestationtypes.ID{}, err
|
||||
}
|
||||
|
||||
// TODO: Choose a way to salt the key derivation
|
||||
ownerID, err := util.DeriveKey(masterSecret, []byte("Constellation"), []byte("id"), config.RNGLengthDefault)
|
||||
if err != nil {
|
||||
return attestationtypes.ID{}, err
|
||||
}
|
||||
|
||||
return attestationtypes.ID{Owner: ownerID, Cluster: clusterID}, nil
|
||||
}
|
||||
|
||||
func sshProtoKeysToMap(keys []*initproto.SSHUserKey) map[string]string {
|
||||
keyMap := make(map[string]string)
|
||||
for _, key := range keys {
|
||||
keyMap[key.Username] = key.PublicKey
|
||||
}
|
||||
return keyMap
|
||||
}
|
||||
|
||||
type ClusterInitializer interface {
|
||||
InitCluster(
|
||||
ctx context.Context,
|
||||
autoscalingNodeGroups []string,
|
||||
cloudServiceAccountURI string,
|
||||
kubernetesVersion string,
|
||||
id attestationtypes.ID,
|
||||
config kubernetes.KMSConfig,
|
||||
sshUserKeys map[string]string,
|
||||
) ([]byte, error)
|
||||
}
|
||||
|
||||
// EncryptedDisk manages the encrypted state disk.
|
||||
type EncryptedDisk interface {
|
||||
// Open prepares the underlying device for disk operations.
|
||||
Open() error
|
||||
// Close closes the underlying device.
|
||||
Close() error
|
||||
// UUID gets the device's UUID.
|
||||
UUID() (string, error)
|
||||
// UpdatePassphrase switches the initial random passphrase of the encrypted disk to a permanent passphrase.
|
||||
UpdatePassphrase(passphrase string) error
|
||||
}
|
376
coordinator/internal/joinclient/client.go
Normal file
376
coordinator/internal/joinclient/client.go
Normal file
@ -0,0 +1,376 @@
|
||||
package joinclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/activation/activationproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/diskencryption"
|
||||
"github.com/edgelesssys/constellation/coordinator/nodestate"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/cloud/metadata"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
interval = 30 * time.Second
|
||||
timeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// JoinClient is a client for self-activation of node.
|
||||
type JoinClient struct {
|
||||
nodeLock *sync.Mutex
|
||||
diskUUID string
|
||||
nodeName string
|
||||
role role.Role
|
||||
disk EncryptedDisk
|
||||
fileHandler file.Handler
|
||||
|
||||
timeout time.Duration
|
||||
interval time.Duration
|
||||
clock clock.WithTicker
|
||||
|
||||
dialer grpcDialer
|
||||
joiner ClusterJoiner
|
||||
metadataAPI metadataAPI
|
||||
|
||||
log *zap.Logger
|
||||
|
||||
mux sync.Mutex
|
||||
stopC chan struct{}
|
||||
stopDone chan struct{}
|
||||
}
|
||||
|
||||
// New creates a new SelfActivationClient.
|
||||
func New(nodeLock *sync.Mutex, dial grpcDialer, joiner ClusterJoiner, meta metadataAPI, log *zap.Logger) *JoinClient {
|
||||
return &JoinClient{
|
||||
disk: diskencryption.New(),
|
||||
fileHandler: file.NewHandler(afero.NewOsFs()),
|
||||
timeout: timeout,
|
||||
interval: interval,
|
||||
clock: clock.RealClock{},
|
||||
dialer: dial,
|
||||
joiner: joiner,
|
||||
metadataAPI: meta,
|
||||
log: log.Named("selfactivation-client"),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the client routine. The client will make the needed API calls to activate
|
||||
// the node as the role it receives from the metadata API.
|
||||
// Multiple calls of start on the same client won't start a second routine if there is
|
||||
// already a routine running.
|
||||
func (c *JoinClient) Start() {
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
|
||||
if c.stopC != nil { // daemon already running
|
||||
return
|
||||
}
|
||||
|
||||
c.log.Info("Starting")
|
||||
c.stopC = make(chan struct{}, 1)
|
||||
c.stopDone = make(chan struct{}, 1)
|
||||
|
||||
ticker := c.clock.NewTicker(c.interval)
|
||||
go func() {
|
||||
defer ticker.Stop()
|
||||
defer func() { c.stopDone <- struct{}{} }()
|
||||
|
||||
diskUUID, err := c.GetDiskUUID()
|
||||
if err != nil {
|
||||
c.log.Error("Failed to get disk UUID", zap.Error(err))
|
||||
c.log.Error("Stopping self-activation client")
|
||||
return
|
||||
}
|
||||
c.diskUUID = diskUUID
|
||||
|
||||
for {
|
||||
err := c.getNodeMetadata()
|
||||
if err == nil {
|
||||
c.log.Info("Received own instance metadata", zap.String("role", c.role.String()), zap.String("name", c.nodeName))
|
||||
break
|
||||
}
|
||||
c.log.Info("Failed to retrieve instance metadata", zap.Error(err))
|
||||
|
||||
c.log.Info("Sleeping", zap.Duration("interval", c.interval))
|
||||
select {
|
||||
case <-c.stopC:
|
||||
return
|
||||
case <-ticker.C():
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
err := c.tryJoinAtAvailableServices()
|
||||
if err == nil {
|
||||
c.log.Info("Activated successfully. SelfActivationClient shut down.")
|
||||
return
|
||||
}
|
||||
c.log.Info("Activation failed for all available endpoints", zap.Error(err))
|
||||
|
||||
c.log.Info("Sleeping", zap.Duration("interval", c.interval))
|
||||
select {
|
||||
case <-c.stopC:
|
||||
return
|
||||
case <-ticker.C():
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop stops the client and blocks until the client's routine is stopped.
|
||||
func (c *JoinClient) Stop() {
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
|
||||
if c.stopC == nil { // daemon not running
|
||||
return
|
||||
}
|
||||
|
||||
c.log.Info("Stopping")
|
||||
|
||||
c.stopC <- struct{}{}
|
||||
<-c.stopDone
|
||||
|
||||
c.stopC = nil
|
||||
c.stopDone = nil
|
||||
|
||||
c.log.Info("Stopped")
|
||||
}
|
||||
|
||||
func (c *JoinClient) tryJoinAtAvailableServices() error {
|
||||
ips, err := c.getCoordinatorIPs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(ips) == 0 {
|
||||
return errors.New("no coordinator IPs found")
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
err = c.join(net.JoinHostPort(ip, strconv.Itoa(constants.ActivationServiceNodePort)))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *JoinClient) join(serviceEndpoint string) error {
|
||||
ctx, cancel := c.timeoutCtx()
|
||||
defer cancel()
|
||||
|
||||
conn, err := c.dialer.Dial(ctx, serviceEndpoint)
|
||||
if err != nil {
|
||||
c.log.Info("join service unreachable", zap.String("endpoint", serviceEndpoint), zap.Error(err))
|
||||
return fmt.Errorf("dialing join service endpoint: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
protoClient := activationproto.NewAPIClient(conn)
|
||||
|
||||
switch c.role {
|
||||
case role.Node:
|
||||
return c.joinAsWorkerNode(ctx, protoClient)
|
||||
case role.Coordinator:
|
||||
return c.joinAsControlPlaneNode(ctx, protoClient)
|
||||
default:
|
||||
return fmt.Errorf("cannot activate as %s", role.Unknown)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *JoinClient) joinAsWorkerNode(ctx context.Context, client activationproto.APIClient) error {
|
||||
req := &activationproto.ActivateWorkerNodeRequest{
|
||||
DiskUuid: c.diskUUID,
|
||||
NodeName: c.nodeName,
|
||||
}
|
||||
resp, err := client.ActivateWorkerNode(ctx, req)
|
||||
if err != nil {
|
||||
c.log.Info("Failed to activate as worker node", zap.Error(err))
|
||||
return fmt.Errorf("activating worker node: %w", err)
|
||||
}
|
||||
c.log.Info("Activation at AaaS succeeded")
|
||||
|
||||
return c.startNodeAndJoin(
|
||||
ctx,
|
||||
resp.StateDiskKey,
|
||||
resp.OwnerId,
|
||||
resp.ClusterId,
|
||||
resp.KubeletKey,
|
||||
resp.KubeletCert,
|
||||
resp.ApiServerEndpoint,
|
||||
resp.Token,
|
||||
resp.DiscoveryTokenCaCertHash,
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
func (c *JoinClient) joinAsControlPlaneNode(ctx context.Context, client activationproto.APIClient) error {
|
||||
req := &activationproto.ActivateControlPlaneNodeRequest{
|
||||
DiskUuid: c.diskUUID,
|
||||
NodeName: c.nodeName,
|
||||
}
|
||||
resp, err := client.ActivateControlPlaneNode(ctx, req)
|
||||
if err != nil {
|
||||
c.log.Info("Failed to activate as control plane node", zap.Error(err))
|
||||
return fmt.Errorf("activating control plane node: %w", err)
|
||||
}
|
||||
c.log.Info("Activation at AaaS succeeded")
|
||||
|
||||
return c.startNodeAndJoin(
|
||||
ctx,
|
||||
resp.StateDiskKey,
|
||||
resp.OwnerId,
|
||||
resp.ClusterId,
|
||||
resp.KubeletKey,
|
||||
resp.KubeletCert,
|
||||
resp.ApiServerEndpoint,
|
||||
resp.Token,
|
||||
resp.DiscoveryTokenCaCertHash,
|
||||
resp.CertificateKey,
|
||||
)
|
||||
}
|
||||
|
||||
func (c *JoinClient) startNodeAndJoin(ctx context.Context, diskKey, ownerID, clusterID, kubeletKey, kubeletCert []byte, endpoint, token,
|
||||
discoveryCACertHash, certKey string,
|
||||
) error {
|
||||
if ok := c.nodeLock.TryLock(); !ok {
|
||||
return errors.New("node is already being initialized")
|
||||
}
|
||||
|
||||
if err := c.updateDiskPassphrase(string(diskKey)); err != nil {
|
||||
return fmt.Errorf("updating disk passphrase: %w", err)
|
||||
}
|
||||
|
||||
state := nodestate.NodeState{
|
||||
Role: c.role,
|
||||
OwnerID: ownerID,
|
||||
ClusterID: clusterID,
|
||||
}
|
||||
if err := state.ToFile(c.fileHandler); err != nil {
|
||||
return fmt.Errorf("persisting node state: %w", err)
|
||||
}
|
||||
|
||||
btd := &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: endpoint,
|
||||
Token: token,
|
||||
CACertHashes: []string{discoveryCACertHash},
|
||||
}
|
||||
if err := c.joiner.JoinCluster(ctx, btd, certKey, c.role); err != nil {
|
||||
return fmt.Errorf("joining Kubernetes cluster: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *JoinClient) getNodeMetadata() error {
|
||||
ctx, cancel := c.timeoutCtx()
|
||||
defer cancel()
|
||||
|
||||
c.log.Info("Requesting node metadata from metadata API")
|
||||
inst, err := c.metadataAPI.Self(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.log.Info("Received node metadata", zap.Any("instance", inst))
|
||||
|
||||
if inst.Name == "" {
|
||||
return errors.New("got instance metadata with empty name")
|
||||
}
|
||||
|
||||
if inst.Role == role.Unknown {
|
||||
return errors.New("got instance metadata with unknown role")
|
||||
}
|
||||
|
||||
c.nodeName = inst.Name
|
||||
c.role = inst.Role
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *JoinClient) updateDiskPassphrase(passphrase string) error {
|
||||
if err := c.disk.Open(); err != nil {
|
||||
return fmt.Errorf("opening disk: %w", err)
|
||||
}
|
||||
defer c.disk.Close()
|
||||
return c.disk.UpdatePassphrase(passphrase)
|
||||
}
|
||||
|
||||
func (c *JoinClient) GetDiskUUID() (string, error) {
|
||||
if err := c.disk.Open(); err != nil {
|
||||
return "", fmt.Errorf("opening disk: %w", err)
|
||||
}
|
||||
defer c.disk.Close()
|
||||
return c.disk.UUID()
|
||||
}
|
||||
|
||||
func (c *JoinClient) getCoordinatorIPs() ([]string, error) {
|
||||
ctx, cancel := c.timeoutCtx()
|
||||
defer cancel()
|
||||
|
||||
instances, err := c.metadataAPI.List(ctx)
|
||||
if err != nil {
|
||||
c.log.Error("Failed to list instances from metadata API", zap.Error(err))
|
||||
return nil, fmt.Errorf("listing instances from metadata API: %w", err)
|
||||
}
|
||||
|
||||
ips := []string{}
|
||||
for _, instance := range instances {
|
||||
if instance.Role == role.Coordinator {
|
||||
ips = append(ips, instance.PrivateIPs...)
|
||||
}
|
||||
}
|
||||
|
||||
c.log.Info("Received Coordinator endpoints", zap.Strings("IPs", ips))
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func (c *JoinClient) timeoutCtx() (context.Context, context.CancelFunc) {
|
||||
return context.WithTimeout(context.Background(), c.timeout)
|
||||
}
|
||||
|
||||
type grpcDialer interface {
|
||||
Dial(ctx context.Context, target string) (*grpc.ClientConn, error)
|
||||
}
|
||||
|
||||
type ClusterJoiner interface {
|
||||
JoinCluster(
|
||||
ctx context.Context,
|
||||
args *kubeadm.BootstrapTokenDiscovery,
|
||||
certKey string,
|
||||
peerRole role.Role,
|
||||
) error
|
||||
}
|
||||
|
||||
type metadataAPI interface {
|
||||
// List retrieves all instances belonging to the current constellation.
|
||||
List(ctx context.Context) ([]metadata.InstanceMetadata, error)
|
||||
// Self retrieves the current instance.
|
||||
Self(ctx context.Context) (metadata.InstanceMetadata, error)
|
||||
}
|
||||
|
||||
// EncryptedDisk manages the encrypted state disk.
|
||||
type EncryptedDisk interface {
|
||||
// Open prepares the underlying device for disk operations.
|
||||
Open() error
|
||||
// Close closes the underlying device.
|
||||
Close() error
|
||||
// UUID gets the device's UUID.
|
||||
UUID() (string, error)
|
||||
// UpdatePassphrase switches the initial random passphrase of the encrypted disk to a permanent passphrase.
|
||||
UpdatePassphrase(passphrase string) error
|
||||
}
|
327
coordinator/internal/joinclient/client_test.go
Normal file
327
coordinator/internal/joinclient/client_test.go
Normal file
@ -0,0 +1,327 @@
|
||||
package joinclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/activation/activationproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/cloud/metadata"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/testdialer"
|
||||
"github.com/spf13/afero"
|
||||
"go.uber.org/goleak"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
testclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
self := metadata.InstanceMetadata{Role: role.Node, Name: "node-1"}
|
||||
peers := []metadata.InstanceMetadata{
|
||||
{Role: role.Node, Name: "node-2", PrivateIPs: []string{"192.0.2.8"}},
|
||||
{Role: role.Coordinator, Name: "node-3", PrivateIPs: []string{"192.0.2.1"}},
|
||||
{Role: role.Coordinator, Name: "node-4", PrivateIPs: []string{"192.0.2.2", "192.0.2.3"}},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
role role.Role
|
||||
clusterJoiner ClusterJoiner
|
||||
disk EncryptedDisk
|
||||
nodeLock *sync.Mutex
|
||||
apiAnswers []any
|
||||
}{
|
||||
"on node: metadata self: errors occur": {
|
||||
role: role.Node,
|
||||
apiAnswers: []any{
|
||||
selfAnswer{err: someErr},
|
||||
selfAnswer{err: someErr},
|
||||
selfAnswer{err: someErr},
|
||||
selfAnswer{instance: self},
|
||||
listAnswer{instances: peers},
|
||||
activateWorkerNodeAnswer{},
|
||||
},
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: &sync.Mutex{},
|
||||
disk: &stubDisk{},
|
||||
},
|
||||
"on node: metadata self: invalid answer": {
|
||||
role: role.Node,
|
||||
apiAnswers: []any{
|
||||
selfAnswer{},
|
||||
selfAnswer{instance: metadata.InstanceMetadata{Role: role.Node}},
|
||||
selfAnswer{instance: metadata.InstanceMetadata{Name: "node-1"}},
|
||||
selfAnswer{instance: self},
|
||||
listAnswer{instances: peers},
|
||||
activateWorkerNodeAnswer{},
|
||||
},
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: &sync.Mutex{},
|
||||
disk: &stubDisk{},
|
||||
},
|
||||
"on node: metadata list: errors occur": {
|
||||
role: role.Node,
|
||||
apiAnswers: []any{
|
||||
selfAnswer{instance: self},
|
||||
listAnswer{err: someErr},
|
||||
listAnswer{err: someErr},
|
||||
listAnswer{err: someErr},
|
||||
listAnswer{instances: peers},
|
||||
activateWorkerNodeAnswer{},
|
||||
},
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: &sync.Mutex{},
|
||||
disk: &stubDisk{},
|
||||
},
|
||||
"on node: metadata list: no coordinators in answer": {
|
||||
role: role.Node,
|
||||
apiAnswers: []any{
|
||||
selfAnswer{instance: self},
|
||||
listAnswer{},
|
||||
listAnswer{},
|
||||
listAnswer{},
|
||||
listAnswer{instances: peers},
|
||||
activateWorkerNodeAnswer{},
|
||||
},
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: &sync.Mutex{},
|
||||
disk: &stubDisk{},
|
||||
},
|
||||
"on node: aaas ActivateNode: errors": {
|
||||
role: role.Node,
|
||||
apiAnswers: []any{
|
||||
selfAnswer{instance: self},
|
||||
listAnswer{instances: peers},
|
||||
activateWorkerNodeAnswer{err: someErr},
|
||||
listAnswer{instances: peers},
|
||||
activateWorkerNodeAnswer{err: someErr},
|
||||
listAnswer{instances: peers},
|
||||
activateWorkerNodeAnswer{},
|
||||
},
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: &sync.Mutex{},
|
||||
disk: &stubDisk{},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
clock := testclock.NewFakeClock(time.Now())
|
||||
metadataAPI := newStubMetadataAPI()
|
||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, nil, netDialer)
|
||||
|
||||
client := &JoinClient{
|
||||
nodeLock: tc.nodeLock,
|
||||
timeout: 30 * time.Second,
|
||||
interval: time.Millisecond,
|
||||
dialer: dialer,
|
||||
disk: tc.disk,
|
||||
joiner: tc.clusterJoiner,
|
||||
fileHandler: fileHandler,
|
||||
metadataAPI: metadataAPI,
|
||||
clock: clock,
|
||||
log: zaptest.NewLogger(t),
|
||||
}
|
||||
|
||||
serverCreds := atlscredentials.New(nil, nil)
|
||||
activationServer := grpc.NewServer(grpc.Creds(serverCreds))
|
||||
activationAPI := newStubActivationServiceAPI()
|
||||
activationproto.RegisterAPIServer(activationServer, activationAPI)
|
||||
port := strconv.Itoa(constants.ActivationServiceNodePort)
|
||||
listener := netDialer.GetListener(net.JoinHostPort("192.0.2.3", port))
|
||||
go activationServer.Serve(listener)
|
||||
defer activationServer.GracefulStop()
|
||||
|
||||
client.Start()
|
||||
|
||||
for _, a := range tc.apiAnswers {
|
||||
switch a := a.(type) {
|
||||
case selfAnswer:
|
||||
metadataAPI.selfAnswerC <- a
|
||||
case listAnswer:
|
||||
metadataAPI.listAnswerC <- a
|
||||
case activateWorkerNodeAnswer:
|
||||
activationAPI.activateWorkerNodeAnswerC <- a
|
||||
}
|
||||
clock.Step(time.Second)
|
||||
}
|
||||
|
||||
client.Stop()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientConcurrentStartStop(t *testing.T) {
|
||||
client := &JoinClient{
|
||||
metadataAPI: &stubRepeaterMetadataAPI{},
|
||||
clock: testclock.NewFakeClock(time.Now()),
|
||||
log: zap.NewNop(),
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
start := func() {
|
||||
defer wg.Done()
|
||||
client.Start()
|
||||
}
|
||||
|
||||
stop := func() {
|
||||
defer wg.Done()
|
||||
client.Stop()
|
||||
}
|
||||
|
||||
wg.Add(10)
|
||||
go stop()
|
||||
go start()
|
||||
go start()
|
||||
go stop()
|
||||
go stop()
|
||||
go start()
|
||||
go start()
|
||||
go stop()
|
||||
go stop()
|
||||
go start()
|
||||
wg.Wait()
|
||||
|
||||
client.Stop()
|
||||
}
|
||||
|
||||
type stubRepeaterMetadataAPI struct {
|
||||
selfInstance metadata.InstanceMetadata
|
||||
selfErr error
|
||||
listInstances []metadata.InstanceMetadata
|
||||
listErr error
|
||||
}
|
||||
|
||||
func (s *stubRepeaterMetadataAPI) Self(_ context.Context) (metadata.InstanceMetadata, error) {
|
||||
return s.selfInstance, s.selfErr
|
||||
}
|
||||
|
||||
func (s *stubRepeaterMetadataAPI) List(_ context.Context) ([]metadata.InstanceMetadata, error) {
|
||||
return s.listInstances, s.listErr
|
||||
}
|
||||
|
||||
type stubMetadataAPI struct {
|
||||
selfAnswerC chan selfAnswer
|
||||
listAnswerC chan listAnswer
|
||||
}
|
||||
|
||||
func newStubMetadataAPI() *stubMetadataAPI {
|
||||
return &stubMetadataAPI{
|
||||
selfAnswerC: make(chan selfAnswer),
|
||||
listAnswerC: make(chan listAnswer),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stubMetadataAPI) Self(_ context.Context) (metadata.InstanceMetadata, error) {
|
||||
answer := <-s.selfAnswerC
|
||||
return answer.instance, answer.err
|
||||
}
|
||||
|
||||
func (s *stubMetadataAPI) List(_ context.Context) ([]metadata.InstanceMetadata, error) {
|
||||
answer := <-s.listAnswerC
|
||||
return answer.instances, answer.err
|
||||
}
|
||||
|
||||
type selfAnswer struct {
|
||||
instance metadata.InstanceMetadata
|
||||
err error
|
||||
}
|
||||
|
||||
type listAnswer struct {
|
||||
instances []metadata.InstanceMetadata
|
||||
err error
|
||||
}
|
||||
|
||||
type stubActivationServiceAPI struct {
|
||||
activateWorkerNodeAnswerC chan activateWorkerNodeAnswer
|
||||
activateControlPlaneNodeAnswerC chan activateControlPlaneNodeAnswer
|
||||
|
||||
activationproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func newStubActivationServiceAPI() *stubActivationServiceAPI {
|
||||
return &stubActivationServiceAPI{
|
||||
activateWorkerNodeAnswerC: make(chan activateWorkerNodeAnswer),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stubActivationServiceAPI) ActivateWorkerNode(_ context.Context, _ *activationproto.ActivateWorkerNodeRequest,
|
||||
) (*activationproto.ActivateWorkerNodeResponse, error) {
|
||||
answer := <-s.activateWorkerNodeAnswerC
|
||||
if answer.resp == nil {
|
||||
answer.resp = &activationproto.ActivateWorkerNodeResponse{}
|
||||
}
|
||||
return answer.resp, answer.err
|
||||
}
|
||||
|
||||
func (s *stubActivationServiceAPI) ActivateControlPlaneNode(_ context.Context, _ *activationproto.ActivateControlPlaneNodeRequest,
|
||||
) (*activationproto.ActivateControlPlaneNodeResponse, error) {
|
||||
answer := <-s.activateControlPlaneNodeAnswerC
|
||||
if answer.resp == nil {
|
||||
answer.resp = &activationproto.ActivateControlPlaneNodeResponse{}
|
||||
}
|
||||
return answer.resp, answer.err
|
||||
}
|
||||
|
||||
type activateWorkerNodeAnswer struct {
|
||||
resp *activationproto.ActivateWorkerNodeResponse
|
||||
err error
|
||||
}
|
||||
|
||||
type activateControlPlaneNodeAnswer struct {
|
||||
resp *activationproto.ActivateControlPlaneNodeResponse
|
||||
err error
|
||||
}
|
||||
|
||||
type stubClusterJoiner struct {
|
||||
joinClusterErr error
|
||||
}
|
||||
|
||||
func (j *stubClusterJoiner) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, string, role.Role) error {
|
||||
return j.joinClusterErr
|
||||
}
|
||||
|
||||
type stubDisk struct {
|
||||
openErr error
|
||||
closeErr error
|
||||
uuid string
|
||||
uuidErr error
|
||||
updatePassphraseErr error
|
||||
updatePassphraseCalled bool
|
||||
}
|
||||
|
||||
func (d *stubDisk) Open() error {
|
||||
return d.openErr
|
||||
}
|
||||
|
||||
func (d *stubDisk) Close() error {
|
||||
return d.closeErr
|
||||
}
|
||||
|
||||
func (d *stubDisk) UUID() (string, error) {
|
||||
return d.uuid, d.uuidErr
|
||||
}
|
||||
|
||||
func (d *stubDisk) UpdatePassphrase(string) error {
|
||||
d.updatePassphraseCalled = true
|
||||
return d.updatePassphraseErr
|
||||
}
|
@ -1,257 +0,0 @@
|
||||
package selfactivation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/activation/activationproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
interval = 30 * time.Second
|
||||
timeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// SelfActivationClient is a client for self-activation of node.
|
||||
type SelfActivationClient struct {
|
||||
diskUUID string
|
||||
role role.Role
|
||||
|
||||
timeout time.Duration
|
||||
interval time.Duration
|
||||
clock clock.WithTicker
|
||||
|
||||
dialer grpcDialer
|
||||
setterAPI activeSetter
|
||||
metadataAPI metadataAPI
|
||||
|
||||
log *zap.Logger
|
||||
|
||||
mux sync.Mutex
|
||||
stopC chan struct{}
|
||||
stopDone chan struct{}
|
||||
}
|
||||
|
||||
// NewClient creates a new SelfActivationClient.
|
||||
func NewClient(diskUUID string, dial grpcDialer, setter activeSetter, meta metadataAPI, log *zap.Logger) *SelfActivationClient {
|
||||
return &SelfActivationClient{
|
||||
diskUUID: diskUUID,
|
||||
timeout: timeout,
|
||||
interval: interval,
|
||||
clock: clock.RealClock{},
|
||||
dialer: dial,
|
||||
setterAPI: setter,
|
||||
metadataAPI: meta,
|
||||
log: log.Named("selfactivation-client"),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the client routine. The client will make the needed API calls to activate
|
||||
// the node as the role it receives from the metadata API.
|
||||
// Multiple calls of start on the same client won't start a second routine if there is
|
||||
// already a routine running.
|
||||
func (c *SelfActivationClient) Start() {
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
|
||||
if c.stopC != nil { // daemon already running
|
||||
return
|
||||
}
|
||||
|
||||
c.log.Info("Starting")
|
||||
c.stopC = make(chan struct{}, 1)
|
||||
c.stopDone = make(chan struct{}, 1)
|
||||
|
||||
ticker := c.clock.NewTicker(c.interval)
|
||||
go func() {
|
||||
defer ticker.Stop()
|
||||
defer func() { c.stopDone <- struct{}{} }()
|
||||
|
||||
for {
|
||||
c.role = c.getRole()
|
||||
if c.role != role.Unknown {
|
||||
break
|
||||
}
|
||||
|
||||
c.log.Info("Sleeping", zap.Duration("interval", c.interval))
|
||||
select {
|
||||
case <-c.stopC:
|
||||
return
|
||||
case <-ticker.C():
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(katexochen): Delete when Coordinator self-activation is implemented.
|
||||
if c.role == role.Coordinator {
|
||||
c.log.Info("Role is Coordinator, terminating")
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
err := c.tryActivationAtAvailableServices()
|
||||
if err == nil {
|
||||
c.log.Info("Activated successfully. SelfActivationClient shut down.")
|
||||
return
|
||||
}
|
||||
c.log.Info("Activation failed for all available endpoints", zap.Error(err))
|
||||
|
||||
c.log.Info("Sleeping", zap.Duration("interval", c.interval))
|
||||
select {
|
||||
case <-c.stopC:
|
||||
return
|
||||
case <-ticker.C():
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop stops the client and blocks until the client's routine is stopped.
|
||||
func (c *SelfActivationClient) Stop() {
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
|
||||
if c.stopC == nil { // daemon not running
|
||||
return
|
||||
}
|
||||
|
||||
c.log.Info("Stopping")
|
||||
|
||||
c.stopC <- struct{}{}
|
||||
<-c.stopDone
|
||||
|
||||
c.stopC = nil
|
||||
c.stopDone = nil
|
||||
|
||||
c.log.Info("Stopped")
|
||||
}
|
||||
|
||||
func (c *SelfActivationClient) tryActivationAtAvailableServices() error {
|
||||
ips, err := c.getCoordinatorIPs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(ips) == 0 {
|
||||
return errors.New("no coordinator IPs found")
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
err = c.activate(net.JoinHostPort(ip, strconv.Itoa(constants.ActivationServicePort)))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *SelfActivationClient) activate(aaasEndpoint string) error {
|
||||
ctx, cancel := c.timeoutCtx()
|
||||
defer cancel()
|
||||
|
||||
conn, err := c.dialer.Dial(ctx, aaasEndpoint)
|
||||
if err != nil {
|
||||
c.log.Info("AaaS unreachable", zap.String("endpoint", aaasEndpoint), zap.Error(err))
|
||||
return fmt.Errorf("dialing AaaS endpoint: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
protoClient := activationproto.NewAPIClient(conn)
|
||||
|
||||
switch c.role {
|
||||
case role.Node:
|
||||
return c.activateAsWorkerNode(ctx, protoClient)
|
||||
case role.Coordinator:
|
||||
return c.activateAsControlePlaneNode(ctx, protoClient)
|
||||
default:
|
||||
return fmt.Errorf("cannot activate as %s", role.Unknown)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *SelfActivationClient) activateAsWorkerNode(ctx context.Context, client activationproto.APIClient) error {
|
||||
req := &activationproto.ActivateWorkerNodeRequest{DiskUuid: c.diskUUID}
|
||||
resp, err := client.ActivateWorkerNode(ctx, req)
|
||||
if err != nil {
|
||||
c.log.Info("Failed to activate as node", zap.Error(err))
|
||||
return fmt.Errorf("activating node: %w", err)
|
||||
}
|
||||
c.log.Info("Activation at AaaS succeeded")
|
||||
|
||||
return c.setterAPI.SetNodeActive(
|
||||
resp.StateDiskKey,
|
||||
resp.OwnerId,
|
||||
resp.ClusterId,
|
||||
resp.ApiServerEndpoint,
|
||||
resp.Token,
|
||||
resp.DiscoveryTokenCaCertHash,
|
||||
)
|
||||
}
|
||||
|
||||
func (c *SelfActivationClient) activateAsControlePlaneNode(ctx context.Context, client activationproto.APIClient) error {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (c *SelfActivationClient) getRole() role.Role {
|
||||
ctx, cancel := c.timeoutCtx()
|
||||
defer cancel()
|
||||
|
||||
c.log.Info("Requesting role from metadata API")
|
||||
inst, err := c.metadataAPI.Self(ctx)
|
||||
if err != nil {
|
||||
c.log.Error("Failed to get self instance from metadata API", zap.Error(err))
|
||||
return role.Unknown
|
||||
}
|
||||
|
||||
c.log.Info("Received new role", zap.String("role", inst.Role.String()))
|
||||
return inst.Role
|
||||
}
|
||||
|
||||
func (c *SelfActivationClient) getCoordinatorIPs() ([]string, error) {
|
||||
ctx, cancel := c.timeoutCtx()
|
||||
defer cancel()
|
||||
|
||||
instances, err := c.metadataAPI.List(ctx)
|
||||
if err != nil {
|
||||
c.log.Error("Failed to list instances from metadata API", zap.Error(err))
|
||||
return nil, fmt.Errorf("listing instances from metadata API: %w", err)
|
||||
}
|
||||
|
||||
ips := []string{}
|
||||
for _, instance := range instances {
|
||||
if instance.Role == role.Coordinator {
|
||||
ips = append(ips, instance.PrivateIPs...)
|
||||
}
|
||||
}
|
||||
|
||||
c.log.Info("Received Coordinator endpoints", zap.Strings("IPs", ips))
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func (c *SelfActivationClient) timeoutCtx() (context.Context, context.CancelFunc) {
|
||||
return context.WithTimeout(context.Background(), c.timeout)
|
||||
}
|
||||
|
||||
type grpcDialer interface {
|
||||
Dial(ctx context.Context, target string) (*grpc.ClientConn, error)
|
||||
}
|
||||
|
||||
type activeSetter interface {
|
||||
SetNodeActive(diskKey, ownerID, clusterID []byte, endpoint, token, discoveryCACertHash string) error
|
||||
SetCoordinatorActive() error
|
||||
}
|
||||
|
||||
type metadataAPI interface {
|
||||
Self(ctx context.Context) (cloudtypes.Instance, error)
|
||||
List(ctx context.Context) ([]cloudtypes.Instance, error)
|
||||
}
|
@ -1,285 +0,0 @@
|
||||
package selfactivation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/activation/activationproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/testdialer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/goleak"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
"google.golang.org/grpc"
|
||||
testclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
peers := []cloudtypes.Instance{
|
||||
{Role: role.Node, PrivateIPs: []string{"192.0.2.8"}},
|
||||
{Role: role.Coordinator, PrivateIPs: []string{"192.0.2.1"}},
|
||||
{Role: role.Coordinator, PrivateIPs: []string{"192.0.2.2", "192.0.2.3"}},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
role role.Role
|
||||
apiAnswers []any
|
||||
setterAPI *stubActiveSetter
|
||||
}{
|
||||
"on node: metadata self: errors occur": {
|
||||
role: role.Node,
|
||||
apiAnswers: []any{
|
||||
selfAnswer{err: someErr},
|
||||
selfAnswer{err: someErr},
|
||||
selfAnswer{err: someErr},
|
||||
selfAnswer{instance: cloudtypes.Instance{Role: role.Node}},
|
||||
listAnswer{instances: peers},
|
||||
activateNodeAnswer{},
|
||||
},
|
||||
setterAPI: &stubActiveSetter{},
|
||||
},
|
||||
"on node: metadata self: no role in answer": {
|
||||
role: role.Node,
|
||||
apiAnswers: []any{
|
||||
selfAnswer{},
|
||||
selfAnswer{},
|
||||
selfAnswer{},
|
||||
selfAnswer{instance: cloudtypes.Instance{Role: role.Node}},
|
||||
listAnswer{instances: peers},
|
||||
activateNodeAnswer{},
|
||||
},
|
||||
setterAPI: &stubActiveSetter{},
|
||||
},
|
||||
"on node: metadata list: errors occur": {
|
||||
role: role.Node,
|
||||
apiAnswers: []any{
|
||||
selfAnswer{instance: cloudtypes.Instance{Role: role.Node}},
|
||||
listAnswer{err: someErr},
|
||||
listAnswer{err: someErr},
|
||||
listAnswer{err: someErr},
|
||||
listAnswer{instances: peers},
|
||||
activateNodeAnswer{},
|
||||
},
|
||||
setterAPI: &stubActiveSetter{},
|
||||
},
|
||||
"on node: metadata list: no coordinators in answer": {
|
||||
role: role.Node,
|
||||
apiAnswers: []any{
|
||||
selfAnswer{instance: cloudtypes.Instance{Role: role.Node}},
|
||||
listAnswer{},
|
||||
listAnswer{},
|
||||
listAnswer{},
|
||||
listAnswer{instances: peers},
|
||||
activateNodeAnswer{},
|
||||
},
|
||||
setterAPI: &stubActiveSetter{},
|
||||
},
|
||||
"on node: aaas ActivateNode: errors": {
|
||||
role: role.Node,
|
||||
apiAnswers: []any{
|
||||
selfAnswer{instance: cloudtypes.Instance{Role: role.Node}},
|
||||
listAnswer{instances: peers},
|
||||
activateNodeAnswer{err: someErr},
|
||||
listAnswer{instances: peers},
|
||||
activateNodeAnswer{err: someErr},
|
||||
listAnswer{instances: peers},
|
||||
activateNodeAnswer{},
|
||||
},
|
||||
setterAPI: &stubActiveSetter{},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
clock := testclock.NewFakeClock(time.Now())
|
||||
metadataAPI := newStubMetadataAPI()
|
||||
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, nil, netDialer)
|
||||
|
||||
client := &SelfActivationClient{
|
||||
timeout: 30 * time.Second,
|
||||
interval: time.Millisecond,
|
||||
dialer: dialer,
|
||||
setterAPI: tc.setterAPI,
|
||||
metadataAPI: metadataAPI,
|
||||
clock: clock,
|
||||
log: zaptest.NewLogger(t),
|
||||
}
|
||||
|
||||
serverCreds := atlscredentials.New(nil, nil)
|
||||
activationSever := grpc.NewServer(grpc.Creds(serverCreds))
|
||||
activationAPI := newStubActivationServiceAPI()
|
||||
activationproto.RegisterAPIServer(activationSever, activationAPI)
|
||||
port := strconv.Itoa(constants.ActivationServicePort)
|
||||
listener := netDialer.GetListener(net.JoinHostPort("192.0.2.3", port))
|
||||
go activationSever.Serve(listener)
|
||||
defer activationSever.GracefulStop()
|
||||
|
||||
client.Start()
|
||||
|
||||
for _, a := range tc.apiAnswers {
|
||||
switch a := a.(type) {
|
||||
case selfAnswer:
|
||||
metadataAPI.selfAnswerC <- a
|
||||
case listAnswer:
|
||||
metadataAPI.listAnswerC <- a
|
||||
case activateNodeAnswer:
|
||||
activationAPI.activateNodeAnswerC <- a
|
||||
}
|
||||
clock.Step(time.Second)
|
||||
}
|
||||
|
||||
client.Stop()
|
||||
|
||||
if tc.role == role.Node {
|
||||
assert.Equal(1, tc.setterAPI.setNodeActiveCalled)
|
||||
} else {
|
||||
assert.Equal(1, tc.setterAPI.setCoordinatorActiveCalled)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientConcurrentStartStop(t *testing.T) {
|
||||
client := &SelfActivationClient{
|
||||
setterAPI: &stubActiveSetter{},
|
||||
metadataAPI: &stubRepeaterMetadataAPI{},
|
||||
clock: testclock.NewFakeClock(time.Now()),
|
||||
log: zap.NewNop(),
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
start := func() {
|
||||
defer wg.Done()
|
||||
client.Start()
|
||||
}
|
||||
|
||||
stop := func() {
|
||||
defer wg.Done()
|
||||
client.Stop()
|
||||
}
|
||||
|
||||
wg.Add(10)
|
||||
go stop()
|
||||
go start()
|
||||
go start()
|
||||
go stop()
|
||||
go stop()
|
||||
go start()
|
||||
go start()
|
||||
go stop()
|
||||
go stop()
|
||||
go start()
|
||||
wg.Wait()
|
||||
|
||||
client.Stop()
|
||||
}
|
||||
|
||||
type stubActiveSetter struct {
|
||||
setNodeActiveErr error
|
||||
setNodeActiveCalled int
|
||||
setCoordinatorActiveErr error
|
||||
setCoordinatorActiveCalled int
|
||||
}
|
||||
|
||||
func (s *stubActiveSetter) SetNodeActive(_, _, _ []byte, _, _, _ string) error {
|
||||
s.setNodeActiveCalled++
|
||||
return s.setNodeActiveErr
|
||||
}
|
||||
|
||||
func (s *stubActiveSetter) SetCoordinatorActive() error {
|
||||
s.setCoordinatorActiveCalled++
|
||||
return s.setCoordinatorActiveErr
|
||||
}
|
||||
|
||||
type stubRepeaterMetadataAPI struct {
|
||||
selfInstance cloudtypes.Instance
|
||||
selfErr error
|
||||
listInstances []cloudtypes.Instance
|
||||
listErr error
|
||||
}
|
||||
|
||||
func (s *stubRepeaterMetadataAPI) Self(_ context.Context) (cloudtypes.Instance, error) {
|
||||
return s.selfInstance, s.selfErr
|
||||
}
|
||||
|
||||
func (s *stubRepeaterMetadataAPI) List(_ context.Context) ([]cloudtypes.Instance, error) {
|
||||
return s.listInstances, s.listErr
|
||||
}
|
||||
|
||||
type stubMetadataAPI struct {
|
||||
selfAnswerC chan selfAnswer
|
||||
listAnswerC chan listAnswer
|
||||
}
|
||||
|
||||
func newStubMetadataAPI() *stubMetadataAPI {
|
||||
return &stubMetadataAPI{
|
||||
selfAnswerC: make(chan selfAnswer),
|
||||
listAnswerC: make(chan listAnswer),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stubMetadataAPI) Self(_ context.Context) (cloudtypes.Instance, error) {
|
||||
answer := <-s.selfAnswerC
|
||||
return answer.instance, answer.err
|
||||
}
|
||||
|
||||
func (s *stubMetadataAPI) List(_ context.Context) ([]cloudtypes.Instance, error) {
|
||||
answer := <-s.listAnswerC
|
||||
return answer.instances, answer.err
|
||||
}
|
||||
|
||||
type selfAnswer struct {
|
||||
instance cloudtypes.Instance
|
||||
err error
|
||||
}
|
||||
|
||||
type listAnswer struct {
|
||||
instances []cloudtypes.Instance
|
||||
err error
|
||||
}
|
||||
|
||||
type stubActivationServiceAPI struct {
|
||||
activateNodeAnswerC chan activateNodeAnswer
|
||||
|
||||
activationproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func newStubActivationServiceAPI() *stubActivationServiceAPI {
|
||||
return &stubActivationServiceAPI{
|
||||
activateNodeAnswerC: make(chan activateNodeAnswer),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stubActivationServiceAPI) ActivateWorkerNode(_ context.Context, _ *activationproto.ActivateWorkerNodeRequest,
|
||||
) (*activationproto.ActivateWorkerNodeResponse, error) {
|
||||
answer := <-s.activateNodeAnswerC
|
||||
if answer.resp == nil {
|
||||
answer.resp = &activationproto.ActivateWorkerNodeResponse{}
|
||||
}
|
||||
return answer.resp, answer.err
|
||||
}
|
||||
|
||||
type activateNodeAnswer struct {
|
||||
resp *activationproto.ActivateWorkerNodeResponse
|
||||
err error
|
||||
}
|
@ -11,6 +11,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/util"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/spf13/afero"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
@ -59,16 +60,29 @@ func New(cloudProvider string, clusterUtil clusterUtil, configProvider configura
|
||||
}
|
||||
}
|
||||
|
||||
type KMSConfig struct {
|
||||
MasterSecret []byte
|
||||
KMSURI string
|
||||
StorageURI string
|
||||
KeyEncryptionKeyID string
|
||||
UseExistingKEK bool
|
||||
}
|
||||
|
||||
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
|
||||
func (k *KubeWrapper) InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, id attestationtypes.ID, masterSecret []byte, sshUsers map[string]string,
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, k8sVersion string,
|
||||
id attestationtypes.ID, kmsConfig KMSConfig, sshUsers map[string]string,
|
||||
) error {
|
||||
// TODO: k8s version should be user input
|
||||
if err := k.clusterUtil.InstallComponents(context.TODO(), "1.23.6"); err != nil {
|
||||
if err := k.clusterUtil.InstallComponents(ctx, k8sVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeName := vpnIP
|
||||
ip, err := util.GetIPAddr()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeName := ip
|
||||
var providerID string
|
||||
var instance cloudtypes.Instance
|
||||
var publicIP string
|
||||
@ -77,11 +91,10 @@ func (k *KubeWrapper) InitCluster(
|
||||
// this is the IP in "kubeadm init --control-plane-endpoint=<IP/DNS>:<port>" hence the unfortunate name
|
||||
var controlPlaneEndpointIP string
|
||||
var nodeIP string
|
||||
var err error
|
||||
|
||||
// Step 1: retrieve cloud metadata for Kubernetes configuration
|
||||
if k.providerMetadata.Supported() {
|
||||
instance, err = k.providerMetadata.Self(context.TODO())
|
||||
instance, err = k.providerMetadata.Self(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving own instance metadata failed: %w", err)
|
||||
}
|
||||
@ -96,13 +109,13 @@ func (k *KubeWrapper) InitCluster(
|
||||
if len(instance.AliasIPRanges) > 0 {
|
||||
nodePodCIDR = instance.AliasIPRanges[0]
|
||||
}
|
||||
subnetworkPodCIDR, err = k.providerMetadata.GetSubnetworkCIDR(context.TODO())
|
||||
subnetworkPodCIDR, err = k.providerMetadata.GetSubnetworkCIDR(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving subnetwork CIDR failed: %w", err)
|
||||
}
|
||||
controlPlaneEndpointIP = publicIP
|
||||
if k.providerMetadata.SupportsLoadBalancer() {
|
||||
controlPlaneEndpointIP, err = k.providerMetadata.GetLoadBalancerIP(context.TODO())
|
||||
controlPlaneEndpointIP, err = k.providerMetadata.GetLoadBalancerIP(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving load balancer IP failed: %w", err)
|
||||
}
|
||||
@ -142,7 +155,7 @@ func (k *KubeWrapper) InitCluster(
|
||||
return fmt.Errorf("setting up pod network: %w", err)
|
||||
}
|
||||
|
||||
kms := resources.NewKMSDeployment(k.cloudProvider, masterSecret)
|
||||
kms := resources.NewKMSDeployment(k.cloudProvider, kmsConfig.MasterSecret)
|
||||
if err = k.clusterUtil.SetupKMS(k.client, kms); err != nil {
|
||||
return fmt.Errorf("setting up kms: %w", err)
|
||||
}
|
||||
@ -151,7 +164,7 @@ func (k *KubeWrapper) InitCluster(
|
||||
return fmt.Errorf("setting up activation service failed: %w", err)
|
||||
}
|
||||
|
||||
if err := k.setupCCM(context.TODO(), vpnIP, subnetworkPodCIDR, cloudServiceAccountURI, instance); err != nil {
|
||||
if err := k.setupCCM(ctx, subnetworkPodCIDR, cloudServiceAccountURI, instance); err != nil {
|
||||
return fmt.Errorf("setting up cloud controller manager: %w", err)
|
||||
}
|
||||
if err := k.setupCloudNodeManager(); err != nil {
|
||||
@ -202,12 +215,6 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
|
||||
}
|
||||
nodeName = k8sCompliantHostname(nodeName)
|
||||
|
||||
if k.cloudControllerManager.Supported() && k.providerMetadata.Supported() {
|
||||
if err := k.prepareInstanceForCCM(context.TODO(), nodeVPNIP); err != nil {
|
||||
return fmt.Errorf("preparing node for CCM failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: configure kubeadm join config
|
||||
|
||||
joinConfig := k.configProvider.JoinConfiguration(k.cloudControllerManager.Supported())
|
||||
@ -267,13 +274,10 @@ func (k *KubeWrapper) setupActivationService(csp string, measurementsJSON []byte
|
||||
return k.clusterUtil.SetupActivationService(k.client, activationConfiguration)
|
||||
}
|
||||
|
||||
func (k *KubeWrapper) setupCCM(ctx context.Context, vpnIP, subnetworkPodCIDR, cloudServiceAccountURI string, instance cloudtypes.Instance) error {
|
||||
func (k *KubeWrapper) setupCCM(ctx context.Context, subnetworkPodCIDR, cloudServiceAccountURI string, instance cloudtypes.Instance) error {
|
||||
if !k.cloudControllerManager.Supported() {
|
||||
return nil
|
||||
}
|
||||
if err := k.prepareInstanceForCCM(context.TODO(), vpnIP); err != nil {
|
||||
return fmt.Errorf("preparing node for CCM failed: %w", err)
|
||||
}
|
||||
ccmConfigMaps, err := k.cloudControllerManager.ConfigMaps(instance)
|
||||
if err != nil {
|
||||
return fmt.Errorf("defining ConfigMaps for CCM failed: %w", err)
|
||||
@ -326,14 +330,6 @@ func (k *KubeWrapper) setupClusterAutoscaler(instance cloudtypes.Instance, cloud
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareInstanceForCCM sets the vpn IP in cloud provider metadata.
|
||||
func (k *KubeWrapper) prepareInstanceForCCM(ctx context.Context, vpnIP string) error {
|
||||
if err := k.providerMetadata.SetVPNIP(ctx, vpnIP); err != nil {
|
||||
return fmt.Errorf("setting VPN IP for cloud-controller-manager failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// k8sCompliantHostname transforms a hostname to an RFC 1123 compliant, lowercase subdomain as required by Kubernetes node names.
|
||||
// The following regex is used by k8s for validation: /^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$/ .
|
||||
// Only a simple heuristic is used for now (to lowercase, replace underscores).
|
||||
|
@ -35,6 +35,7 @@ func TestInitCluster(t *testing.T) {
|
||||
publicIP := "192.0.2.2"
|
||||
loadbalancerIP := "192.0.2.3"
|
||||
aliasIPRange := "192.0.2.0/24"
|
||||
k8sVersion := "1.23.8"
|
||||
|
||||
testCases := map[string]struct {
|
||||
clusterUtil stubClusterUtil
|
||||
@ -267,7 +268,7 @@ func TestInitCluster(t *testing.T) {
|
||||
client: &tc.kubeCTL,
|
||||
kubeconfigReader: tc.kubeconfigReader,
|
||||
}
|
||||
err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountUri, coordinatorVPNIP, attestationtypes.ID{}, masterSecret, nil)
|
||||
err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountUri, k8sVersion, attestationtypes.ID{}, KMSConfig{MasterSecret: masterSecret}, nil)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
|
@ -12,11 +12,9 @@ const nodeStatePath = "/run/state/constellation/node_state.json"
|
||||
// NodeState is the state of a constellation node that is required to recover from a reboot.
|
||||
// Can be persisted to disk and reloaded later.
|
||||
type NodeState struct {
|
||||
Role role.Role
|
||||
VPNIP string
|
||||
VPNPrivKey []byte
|
||||
OwnerID []byte
|
||||
ClusterID []byte
|
||||
Role role.Role
|
||||
OwnerID []byte
|
||||
ClusterID []byte
|
||||
}
|
||||
|
||||
// FromFile reads a NodeState from disk.
|
||||
|
@ -23,13 +23,11 @@ func TestFromFile(t *testing.T) {
|
||||
wantErr bool
|
||||
}{
|
||||
"nodestate exists": {
|
||||
fileContents: `{ "Role": "Coordinator", "VPNIP": "192.0.2.1", "VPNPrivKey": "dGVzdA==", "OwnerID": "T3duZXJJRA==", "ClusterID": "Q2x1c3RlcklE" }`,
|
||||
fileContents: `{ "Role": "Coordinator", "OwnerID": "T3duZXJJRA==", "ClusterID": "Q2x1c3RlcklE" }`,
|
||||
wantState: &NodeState{
|
||||
Role: role.Coordinator,
|
||||
VPNIP: "192.0.2.1",
|
||||
VPNPrivKey: []byte("test"),
|
||||
OwnerID: []byte("OwnerID"),
|
||||
ClusterID: []byte("ClusterID"),
|
||||
Role: role.Coordinator,
|
||||
OwnerID: []byte("OwnerID"),
|
||||
ClusterID: []byte("ClusterID"),
|
||||
},
|
||||
},
|
||||
"nodestate file does not exist": {
|
||||
@ -68,16 +66,12 @@ func TestToFile(t *testing.T) {
|
||||
}{
|
||||
"writing works": {
|
||||
state: &NodeState{
|
||||
Role: role.Coordinator,
|
||||
VPNIP: "192.0.2.1",
|
||||
VPNPrivKey: []byte("test"),
|
||||
OwnerID: []byte("OwnerID"),
|
||||
ClusterID: []byte("ClusterID"),
|
||||
Role: role.Coordinator,
|
||||
OwnerID: []byte("OwnerID"),
|
||||
ClusterID: []byte("ClusterID"),
|
||||
},
|
||||
wantFile: `{
|
||||
"Role": "Coordinator",
|
||||
"VPNIP": "192.0.2.1",
|
||||
"VPNPrivKey": "dGVzdA==",
|
||||
"OwnerID": "T3duZXJJRA==",
|
||||
"ClusterID": "Q2x1c3RlcklE"
|
||||
}`,
|
||||
|
@ -1,72 +0,0 @@
|
||||
package peer
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
|
||||
)
|
||||
|
||||
// Peer holds all information about a peer.
|
||||
type Peer struct {
|
||||
// PublicIP is the public IP address on which the peer is reachable.
|
||||
PublicIP string
|
||||
// VPNIP holds the internal VPN address, can only be used within the VPN
|
||||
// and some gRPC services may only be reachable from this IP.
|
||||
VPNIP string
|
||||
// VPNPubKey contains the PublicKey used for cryptographic purposes in the VPN.
|
||||
VPNPubKey []byte
|
||||
// Role is the peer's role (Coordinator, Node or Admin).
|
||||
Role role.Role
|
||||
}
|
||||
|
||||
func FromPubProto(peers []*pubproto.Peer) []Peer {
|
||||
var result []Peer
|
||||
for _, p := range peers {
|
||||
result = append(result, Peer{
|
||||
PublicIP: p.PublicIp,
|
||||
VPNIP: p.VpnIp,
|
||||
VPNPubKey: p.VpnPubKey,
|
||||
Role: role.Role(p.Role),
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func ToPubProto(peers []Peer) []*pubproto.Peer {
|
||||
var result []*pubproto.Peer
|
||||
for _, p := range peers {
|
||||
result = append(result, &pubproto.Peer{
|
||||
PublicIp: p.PublicIP,
|
||||
VpnIp: p.VPNIP,
|
||||
VpnPubKey: p.VPNPubKey,
|
||||
Role: uint32(p.Role),
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func FromVPNProto(peers []*vpnproto.Peer) []Peer {
|
||||
var result []Peer
|
||||
for _, p := range peers {
|
||||
result = append(result, Peer{
|
||||
PublicIP: p.PublicIp,
|
||||
VPNIP: p.VpnIp,
|
||||
VPNPubKey: p.VpnPubKey,
|
||||
Role: role.Role(p.Role),
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func ToVPNProto(peers []Peer) []*vpnproto.Peer {
|
||||
var result []*vpnproto.Peer
|
||||
for _, p := range peers {
|
||||
result = append(result, &vpnproto.Peer{
|
||||
PublicIp: p.PublicIP,
|
||||
VpnIp: p.VPNIP,
|
||||
VpnPubKey: p.VPNPubKey,
|
||||
Role: uint32(p.Role),
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
@ -1,481 +0,0 @@
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/config"
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"github.com/edgelesssys/constellation/state/keyservice/keyproto"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ActivateAsCoordinator is the RPC call to activate the Coordinator.
|
||||
func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, srv pubproto.API_ActivateAsCoordinatorServer) (reterr error) {
|
||||
a.mut.Lock()
|
||||
defer a.mut.Unlock()
|
||||
|
||||
a.cloudLogger.Disclose("ActivateAsCoordinator called.")
|
||||
|
||||
if err := a.core.RequireState(state.AcceptingInit); err != nil {
|
||||
return status.Errorf(codes.FailedPrecondition, "node is not in required state: %v", err)
|
||||
}
|
||||
|
||||
if len(in.MasterSecret) == 0 {
|
||||
a.logger.Error("missing master secret")
|
||||
return status.Error(codes.InvalidArgument, "missing master secret")
|
||||
}
|
||||
|
||||
logToCLI := a.newLogToCLIFunc(func(msg string) error {
|
||||
return srv.Send(&pubproto.ActivateAsCoordinatorResponse{
|
||||
Content: &pubproto.ActivateAsCoordinatorResponse_Log{
|
||||
Log: &pubproto.Log{
|
||||
Message: msg,
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
logToCLI("Initializing first control-plane node ...")
|
||||
|
||||
// If any of the following actions fail, we cannot revert
|
||||
// Thus, mark this peer as failed.
|
||||
defer func() {
|
||||
if reterr != nil {
|
||||
_ = a.core.AdvanceState(state.Failed, nil, nil)
|
||||
}
|
||||
}()
|
||||
|
||||
// AdvanceState MUST be called before any other functions that are not sanity checks or otherwise required
|
||||
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
|
||||
// Any new additions to ActivateAsNode MUST come after
|
||||
if err := a.core.InitializeStoreIPs(); err != nil {
|
||||
return status.Errorf(codes.Internal, "initialize store IPs: %v", err)
|
||||
}
|
||||
|
||||
ownerID, clusterID, err := a.core.GetIDs(in.MasterSecret)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
if err := a.core.AdvanceState(state.ActivatingNodes, ownerID, clusterID); err != nil {
|
||||
return status.Errorf(codes.Internal, "advance state to ActivatingNodes: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
if err := a.core.SetUpKMS(ctx, in.StorageUri, in.KmsUri, in.KeyEncryptionKeyId, in.UseExistingKek); err != nil {
|
||||
return status.Errorf(codes.Internal, "setting up KMS: %v", err)
|
||||
}
|
||||
vpnIP, err := a.core.GetNextCoordinatorIP()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "get coordinator vpn IP address: %v", err)
|
||||
}
|
||||
coordPeer, err := a.assemblePeerStruct(vpnIP, role.Coordinator)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "assembling the coordinator peer struct: %v", err)
|
||||
}
|
||||
|
||||
if err := a.core.SetVPNIP(coordPeer.VPNIP); err != nil {
|
||||
return status.Errorf(codes.Internal, "set the vpn IP address: %v", err)
|
||||
}
|
||||
if err := a.core.AddPeer(coordPeer); err != nil {
|
||||
return status.Errorf(codes.Internal, "adding the coordinator to store/vpn: %v", err)
|
||||
}
|
||||
|
||||
// Setup SSH users for the first coordinator, if defined
|
||||
if len(in.SshUserKeys) != 0 {
|
||||
logToCLI("Creating SSH users on first control-plane node...")
|
||||
sshUserKeys := ssh.FromProtoSlice(in.SshUserKeys)
|
||||
if err := a.core.CreateSSHUsers(sshUserKeys); err != nil {
|
||||
return status.Errorf(codes.Internal, "creating SSH users: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
logToCLI("Initializing Kubernetes ...")
|
||||
id := attestationtypes.ID{Owner: ownerID, Cluster: clusterID}
|
||||
kubeconfig, err := a.core.InitCluster(context.TODO(), in.AutoscalingNodeGroups, in.CloudServiceAccountUri, id, in.MasterSecret, in.SshUserKeys)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "initializing Kubernetes cluster: %v", err)
|
||||
}
|
||||
|
||||
// run the VPN-API server
|
||||
if err := a.StartVPNAPIServer(coordPeer.VPNIP); err != nil {
|
||||
return status.Errorf(codes.Internal, "start vpnAPIServer: %v", err)
|
||||
}
|
||||
if err := a.core.SwitchToPersistentStore(); err != nil {
|
||||
return status.Errorf(codes.Internal, "switch to persistent store: %v", err)
|
||||
}
|
||||
|
||||
// TODO: check performance and maybe make concurrent
|
||||
if err := a.activateCoordinators(logToCLI, in.CoordinatorPublicIps, in.SshUserKeys); err != nil {
|
||||
a.logger.Error("coordinator activation failed", zap.Error(err))
|
||||
return status.Errorf(codes.Internal, "coordinator initialization: %v", err)
|
||||
}
|
||||
// TODO: check performance and maybe make concurrent
|
||||
if err := a.activateNodes(logToCLI, in.NodePublicIps, in.SshUserKeys); err != nil {
|
||||
a.logger.Error("node activation failed", zap.Error(err))
|
||||
return status.Errorf(codes.Internal, "node initialization: %v", err)
|
||||
}
|
||||
// persist node state on disk
|
||||
if err := a.core.PersistNodeState(role.Coordinator, coordPeer.VPNIP, ownerID, clusterID); err != nil {
|
||||
return status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||
}
|
||||
diskUUID, err := a.core.GetDiskUUID()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "getting disk uuid: %v", err)
|
||||
}
|
||||
diskKey, err := a.core.GetDataKey(ctx, diskUUID, 32)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "getting disk key: %v", err)
|
||||
}
|
||||
if err := a.core.UpdateDiskPassphrase(string(diskKey)); err != nil {
|
||||
return status.Errorf(codes.Internal, "updating disk key: %v", err)
|
||||
}
|
||||
|
||||
adminVPNIP, err := a.core.GetNextNodeIP()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "requesting node IP address: %v", err)
|
||||
}
|
||||
// This effectively gives code execution, so we do this last.
|
||||
err = a.core.AddPeer(peer.Peer{
|
||||
VPNIP: adminVPNIP,
|
||||
VPNPubKey: in.AdminVpnPubKey,
|
||||
Role: role.Admin,
|
||||
})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "add peer to store/vpn: %v", err)
|
||||
}
|
||||
|
||||
return srv.Send(&pubproto.ActivateAsCoordinatorResponse{
|
||||
Content: &pubproto.ActivateAsCoordinatorResponse_AdminConfig{
|
||||
AdminConfig: &pubproto.AdminConfig{
|
||||
AdminVpnIp: adminVPNIP,
|
||||
CoordinatorVpnPubKey: coordPeer.VPNPubKey,
|
||||
Kubeconfig: kubeconfig,
|
||||
OwnerId: ownerID,
|
||||
ClusterId: clusterID,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// ActivateAdditionalNodes is the RPC call to activate additional nodes.
|
||||
func (a *API) ActivateAdditionalNodes(in *pubproto.ActivateAdditionalNodesRequest, srv pubproto.API_ActivateAdditionalNodesServer) error {
|
||||
a.cloudLogger.Disclose("ActivateAdditionalNodes called.")
|
||||
|
||||
if err := a.core.RequireState(state.ActivatingNodes); err != nil {
|
||||
return status.Errorf(codes.FailedPrecondition, "%v", err)
|
||||
}
|
||||
|
||||
logToCLI := a.newLogToCLIFunc(func(msg string) error {
|
||||
return srv.Send(&pubproto.ActivateAdditionalNodesResponse{
|
||||
Log: &pubproto.Log{
|
||||
Message: msg,
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
// TODO: check performance and maybe make concurrent
|
||||
if err := a.activateNodes(logToCLI, in.NodePublicIps, in.SshUserKeys); err != nil {
|
||||
a.logger.Error("node activation failed", zap.Error(err))
|
||||
return status.Errorf(codes.Internal, "activating nodes: %v", err)
|
||||
}
|
||||
|
||||
return srv.Send(&pubproto.ActivateAdditionalNodesResponse{
|
||||
Log: &pubproto.Log{
|
||||
Message: "success",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// RequestStateDiskKey triggers the Coordinator to return a key derived from the Constellation's master secret to the caller.
|
||||
func (a *API) RequestStateDiskKey(ctx context.Context, in *pubproto.RequestStateDiskKeyRequest) (*pubproto.RequestStateDiskKeyResponse, error) {
|
||||
a.cloudLogger.Disclose("RequestStateDiskKey called.")
|
||||
if err := a.core.RequireState(state.ActivatingNodes); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
|
||||
}
|
||||
key, err := a.core.GetDataKey(ctx, in.DiskUuid, config.RNGLengthDefault)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "getting data key: %v", err)
|
||||
}
|
||||
|
||||
peer, err := a.peerFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
conn, err := a.dialer.Dial(ctx, peer)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := keyproto.NewAPIClient(conn)
|
||||
if _, err := client.PushStateDiskKey(ctx, &keyproto.PushStateDiskKeyRequest{StateDiskKey: key}); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "pushing key to peer %q: %v", peer, err)
|
||||
}
|
||||
|
||||
return &pubproto.RequestStateDiskKeyResponse{}, nil
|
||||
}
|
||||
|
||||
func (a *API) activateNodes(logToCLI logFunc, nodePublicIPs []string, sshUserKeys []*pubproto.SSHUserKey) error {
|
||||
_, peers, err := a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// we need to add at least all coordinators to the peer for HA
|
||||
initialPeers := peer.ToPubProto(peers)
|
||||
|
||||
ownerID, clusterID, err := a.core.GetIDs(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Activate all nodes.
|
||||
for num, nodePublicIP := range nodePublicIPs {
|
||||
logToCLI("Activating worker node %3d out of %3d ...", num+1, len(nodePublicIPs))
|
||||
nodeVPNIP, err := a.core.GetNextNodeIP()
|
||||
if err != nil {
|
||||
a.logger.Error("generation of vpn ips failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
nodeVpnPubKey, err := a.activateNode(nodePublicIP, nodeVPNIP, initialPeers, ownerID, clusterID, sshUserKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
peer := peer.Peer{
|
||||
PublicIP: nodePublicIP,
|
||||
VPNIP: nodeVPNIP,
|
||||
VPNPubKey: nodeVpnPubKey,
|
||||
Role: role.Node,
|
||||
}
|
||||
if err := a.core.AddPeer(peer); err != nil {
|
||||
return err
|
||||
}
|
||||
// This can be omitted if we
|
||||
// 1. Use a gRPC HA balancer mechanism, which picks one active coordinator connection
|
||||
// (nodeUpdate loop causes problems, even if we specify the IP in the joinCluster RPC)
|
||||
if err := a.updateCoordinator(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.joinCluster(nodePublicIP); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Manually trigger an update operation on all peers.
|
||||
// This may be expendable in the future, depending on whether it's acceptable that it takes
|
||||
// some seconds until the nodes get all peer data via their regular update requests.
|
||||
_, peers, err = a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vpnIP, err := a.core.GetVPNIP()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range peers {
|
||||
if p.Role == role.Node {
|
||||
if err := a.triggerNodeUpdate(p.PublicIP); err != nil {
|
||||
a.logger.Error("TriggerNodeUpdate failed", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
if p.Role == role.Coordinator && p.VPNIP != vpnIP {
|
||||
a.logger.Info("update coordinator", zap.String("coordinator vpnIP", p.VPNIP))
|
||||
if err := a.triggerCoordinatorUpdate(context.TODO(), p.PublicIP); err != nil {
|
||||
// no reason to panic here, we can recover
|
||||
a.logger.Error("triggerCoordinatorUpdate failed", zap.Error(err), zap.String("endpoint", p.PublicIP), zap.String("vpnip", p.VPNIP))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *API) activateNode(nodePublicIP string, nodeVPNIP string, initialPeers []*pubproto.Peer, ownerID, clusterID []byte, sshUserKeys []*pubproto.SSHUserKey) ([]byte, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), deadlineDuration)
|
||||
defer cancel()
|
||||
|
||||
conn, err := a.dialer.Dial(ctx, net.JoinHostPort(nodePublicIP, endpointAVPNPort))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
|
||||
stream, err := client.ActivateAsNode(ctx)
|
||||
if err != nil {
|
||||
a.logger.Error("connecting to node for activation failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator -> initial request -> node
|
||||
*/
|
||||
if err := stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||
Request: &pubproto.ActivateAsNodeRequest_InitialRequest{
|
||||
InitialRequest: &pubproto.ActivateAsNodeInitialRequest{
|
||||
NodeVpnIp: nodeVPNIP,
|
||||
Peers: initialPeers,
|
||||
OwnerId: ownerID,
|
||||
ClusterId: clusterID,
|
||||
SshUserKeys: sshUserKeys,
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
a.logger.Error("sending initial message to node for activation failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator <- state disk uuid <- node
|
||||
*/
|
||||
// wait for message containing the nodes disk UUID to send back the permanent encryption key
|
||||
message, err := stream.Recv()
|
||||
if err != nil {
|
||||
a.logger.Error("expected disk UUID message but no message received", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
diskUUID, ok := message.GetResponse().(*pubproto.ActivateAsNodeResponse_StateDiskUuid)
|
||||
if !ok {
|
||||
a.logger.Error("expected disk UUID message but got different message")
|
||||
return nil, errors.New("expected state disk UUID but got different message type")
|
||||
}
|
||||
diskKey, err := a.core.GetDataKey(ctx, diskUUID.StateDiskUuid, 32)
|
||||
if err != nil {
|
||||
a.logger.Error("failed to derive node's disk key")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator -> state disk key -> node
|
||||
*/
|
||||
// send back state disk encryption key
|
||||
if err := stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||
Request: &pubproto.ActivateAsNodeRequest_StateDiskKey{
|
||||
StateDiskKey: diskKey,
|
||||
},
|
||||
}); err != nil {
|
||||
a.logger.Error("sending state disk key to node on activation failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator <- VPN public key <- node
|
||||
*/
|
||||
// wait for message containing the node VPN pubkey
|
||||
message, err = stream.Recv()
|
||||
if err != nil {
|
||||
a.logger.Error("expected node VPN pubkey but no message received", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
vpnPubKey, ok := message.GetResponse().(*pubproto.ActivateAsNodeResponse_NodeVpnPubKey)
|
||||
if !ok {
|
||||
a.logger.Error("expected node VPN pubkey but got different message")
|
||||
return nil, errors.New("expected node VPN pub key but got different message type")
|
||||
}
|
||||
|
||||
return vpnPubKey.NodeVpnPubKey, nil
|
||||
}
|
||||
|
||||
// assemblePeerStruct combines all information of this peer into a peer struct.
|
||||
func (a *API) assemblePeerStruct(vpnIP string, _ role.Role) (peer.Peer, error) {
|
||||
vpnPubKey, err := a.core.GetVPNPubKey()
|
||||
if err != nil {
|
||||
a.logger.Error("failed to get VPN pub key", zap.Error(err))
|
||||
return peer.Peer{}, err
|
||||
}
|
||||
publicIP, err := a.getPublicIPAddr()
|
||||
if err != nil {
|
||||
a.logger.Error("failed to get public IP", zap.Error(err))
|
||||
return peer.Peer{}, err
|
||||
}
|
||||
return peer.Peer{
|
||||
PublicIP: publicIP,
|
||||
VPNIP: vpnIP,
|
||||
VPNPubKey: vpnPubKey,
|
||||
Role: role.Coordinator,
|
||||
}, err
|
||||
}
|
||||
|
||||
func (a *API) newLogToCLIFunc(send func(string) error) logFunc {
|
||||
return func(format string, v ...any) {
|
||||
if err := send(fmt.Sprintf(format, v...)); err != nil {
|
||||
a.logger.Error("logging to CLI failed", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *API) joinCluster(nodePublicIP string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), deadlineDuration)
|
||||
defer cancel()
|
||||
|
||||
vpnIP, err := a.core.GetVPNIP()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We don't verify the peer certificate here, since JoinCluster triggers a connection over VPN
|
||||
// The target of the rpc needs to already be part of the VPN to process the request, meaning it is trusted
|
||||
conn, err := a.dialer.DialNoVerify(ctx, net.JoinHostPort(nodePublicIP, endpointAVPNPort))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
_, err = client.JoinCluster(ctx, &pubproto.JoinClusterRequest{CoordinatorVpnIp: vpnIP})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *API) updateCoordinator() error {
|
||||
_, peers, err := a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vpnIP, err := a.core.GetVPNIP()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range peers {
|
||||
if p.Role == role.Coordinator && p.VPNIP != vpnIP {
|
||||
a.logger.Info("update coordinator", zap.String("coordinator vpnIP", p.VPNIP))
|
||||
if err := a.triggerCoordinatorUpdate(context.TODO(), p.PublicIP); err != nil {
|
||||
a.logger.Error("triggerCoordinatorUpdate failed", zap.Error(err), zap.String("endpoint", p.PublicIP), zap.String("vpnip", p.VPNIP))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *API) triggerNodeUpdate(nodePublicIP string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), deadlineDuration)
|
||||
defer cancel()
|
||||
|
||||
// We don't verify the peer certificate here, since TriggerNodeUpdate triggers a connection over VPN
|
||||
// The target of the rpc needs to already be part of the VPN to process the request, meaning it is trusted
|
||||
conn, err := a.dialer.DialNoVerify(ctx, net.JoinHostPort(nodePublicIP, endpointAVPNPort))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
_, err = client.TriggerNodeUpdate(ctx, &pubproto.TriggerNodeUpdateRequest{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type logFunc func(format string, v ...any)
|
@ -1,591 +0,0 @@
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/logging"
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/vtpm"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/testdialer"
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
kms "github.com/edgelesssys/constellation/kms/setup"
|
||||
"github.com/edgelesssys/constellation/state/keyservice/keyproto"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
"google.golang.org/grpc"
|
||||
grpcpeer "google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
func TestActivateAsCoordinator(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
coordinatorPubKey := []byte{6, 7, 8}
|
||||
testNode1 := newStubPeer("192.0.2.11", []byte{1, 2, 3})
|
||||
testNode2 := newStubPeer("192.0.2.12", []byte{2, 3, 4})
|
||||
testNode3 := newStubPeer("192.0.2.13", []byte{3, 4, 5})
|
||||
wantNode1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "10.118.0.11", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
||||
wantNode2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "10.118.0.12", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
||||
wantNode3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "10.118.0.13", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
||||
wantCoord := peer.Peer{PublicIP: "192.0.2.1", VPNIP: "10.118.0.1", VPNPubKey: coordinatorPubKey, Role: role.Coordinator}
|
||||
adminPeer := peer.Peer{VPNPubKey: []byte{7, 8, 9}, Role: role.Admin}
|
||||
sshUser1 := &ssh.UserKey{
|
||||
Username: "test-user-1",
|
||||
PublicKey: "ssh-rsa abcdefg",
|
||||
}
|
||||
sshUser2 := &ssh.UserKey{
|
||||
Username: "test-user-2",
|
||||
PublicKey: "ssh-ed25519 hijklmn",
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
nodes []*stubPeer
|
||||
state state.State
|
||||
switchToPersistentStoreErr error
|
||||
wantErr bool
|
||||
wantPeers []peer.Peer
|
||||
wantState state.State
|
||||
adminVPNIP string
|
||||
sshKeys []*ssh.UserKey
|
||||
}{
|
||||
"0 nodes": {
|
||||
state: state.AcceptingInit,
|
||||
wantPeers: []peer.Peer{wantCoord},
|
||||
wantState: state.ActivatingNodes,
|
||||
adminVPNIP: "10.118.0.11",
|
||||
},
|
||||
"1 node": {
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.AcceptingInit,
|
||||
wantPeers: []peer.Peer{wantCoord, wantNode1},
|
||||
wantState: state.ActivatingNodes,
|
||||
adminVPNIP: "10.118.0.12",
|
||||
},
|
||||
"2 nodes": {
|
||||
nodes: []*stubPeer{testNode1, testNode2},
|
||||
state: state.AcceptingInit,
|
||||
wantPeers: []peer.Peer{wantCoord, wantNode1, wantNode2},
|
||||
wantState: state.ActivatingNodes,
|
||||
adminVPNIP: "10.118.0.13",
|
||||
},
|
||||
"3 nodes": {
|
||||
nodes: []*stubPeer{testNode1, testNode2, testNode3},
|
||||
state: state.AcceptingInit,
|
||||
wantPeers: []peer.Peer{wantCoord, wantNode1, wantNode2, wantNode3},
|
||||
wantState: state.ActivatingNodes,
|
||||
adminVPNIP: "10.118.0.14",
|
||||
},
|
||||
"coordinator with SSH users": {
|
||||
state: state.AcceptingInit,
|
||||
wantPeers: []peer.Peer{wantCoord},
|
||||
wantState: state.ActivatingNodes,
|
||||
adminVPNIP: "10.118.0.11",
|
||||
sshKeys: []*ssh.UserKey{sshUser1, sshUser2},
|
||||
},
|
||||
"already activated": {
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.ActivatingNodes,
|
||||
wantErr: true,
|
||||
wantState: state.ActivatingNodes,
|
||||
},
|
||||
"wrong peer kind": {
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.IsNode,
|
||||
wantErr: true,
|
||||
wantState: state.IsNode,
|
||||
},
|
||||
"node activation error": {
|
||||
nodes: []*stubPeer{testNode1, {activateErr: someErr}, testNode3},
|
||||
state: state.AcceptingInit,
|
||||
wantErr: true,
|
||||
wantState: state.Failed,
|
||||
},
|
||||
"node join error": {
|
||||
nodes: []*stubPeer{testNode1, {joinErr: someErr}, testNode3},
|
||||
state: state.AcceptingInit,
|
||||
wantErr: true,
|
||||
wantState: state.Failed,
|
||||
},
|
||||
"SwitchToPersistentStore error": {
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.AcceptingInit,
|
||||
switchToPersistentStoreErr: someErr,
|
||||
wantErr: true,
|
||||
wantState: state.Failed,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
autoscalingNodeGroups := []string{"ang1", "ang2"}
|
||||
keyEncryptionKeyID := "constellation"
|
||||
fs := afero.NewMemMapFs()
|
||||
core := &fakeCore{
|
||||
state: tc.state,
|
||||
vpnPubKey: coordinatorPubKey,
|
||||
switchToPersistentStoreErr: tc.switchToPersistentStoreErr,
|
||||
kubeconfig: []byte("kubeconfig"),
|
||||
ownerID: []byte("ownerID"),
|
||||
clusterID: []byte("clusterID"),
|
||||
linuxUserManager: user.NewLinuxUserManagerFake(fs),
|
||||
}
|
||||
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, fakeValidator{}, netDialer)
|
||||
|
||||
getPublicIPAddr := func() (string, error) {
|
||||
return "192.0.2.1", nil
|
||||
}
|
||||
|
||||
api := New(zaptest.NewLogger(t), &logging.NopLogger{}, core, dialer, stubVPNAPIServer{}, getPublicIPAddr, nil)
|
||||
defer api.Close()
|
||||
|
||||
// spawn nodes
|
||||
var nodePublicIPs []string
|
||||
var wg sync.WaitGroup
|
||||
for _, n := range tc.nodes {
|
||||
nodePublicIPs = append(nodePublicIPs, n.peer.PublicIP)
|
||||
server := n.newServer()
|
||||
wg.Add(1)
|
||||
go func(endpoint string) {
|
||||
listener := netDialer.GetListener(endpoint)
|
||||
wg.Done()
|
||||
_ = server.Serve(listener)
|
||||
}(net.JoinHostPort(n.peer.PublicIP, endpointAVPNPort))
|
||||
defer server.GracefulStop()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
stream := &stubActivateAsCoordinatorServer{}
|
||||
err := api.ActivateAsCoordinator(&pubproto.ActivateAsCoordinatorRequest{
|
||||
AdminVpnPubKey: adminPeer.VPNPubKey,
|
||||
NodePublicIps: nodePublicIPs,
|
||||
AutoscalingNodeGroups: autoscalingNodeGroups,
|
||||
MasterSecret: []byte("Constellation"),
|
||||
KeyEncryptionKeyId: keyEncryptionKeyID,
|
||||
UseExistingKek: false,
|
||||
KmsUri: kms.ClusterKMSURI,
|
||||
StorageUri: kms.NoStoreURI,
|
||||
SshUserKeys: ssh.ToProtoSlice(tc.sshKeys),
|
||||
}, stream)
|
||||
|
||||
assert.Equal(tc.wantState, core.state)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
// Coordinator streams logs and admin conf
|
||||
require.Greater(len(stream.sent), len(tc.nodes))
|
||||
for i := 0; i < len(stream.sent)-1; i++ {
|
||||
assert.NotEmpty(stream.sent[i].GetLog().Message)
|
||||
}
|
||||
adminConfig := stream.sent[len(stream.sent)-1].GetAdminConfig()
|
||||
assert.Equal(tc.adminVPNIP, adminConfig.AdminVpnIp)
|
||||
assert.Equal(coordinatorPubKey, adminConfig.CoordinatorVpnPubKey)
|
||||
assert.Equal(core.kubeconfig, adminConfig.Kubeconfig)
|
||||
assert.Equal(core.ownerID, adminConfig.OwnerId)
|
||||
assert.Equal(core.clusterID, adminConfig.ClusterId)
|
||||
|
||||
// Core is updated
|
||||
vpnIP, err := core.GetVPNIP()
|
||||
require.NoError(err)
|
||||
assert.Equal(vpnIP, core.vpnIP)
|
||||
// construct full list of expected peers
|
||||
adminPeer.VPNIP = tc.adminVPNIP
|
||||
assert.Equal(append(tc.wantPeers, adminPeer), core.peers)
|
||||
assert.Equal(autoscalingNodeGroups, core.autoscalingNodeGroups)
|
||||
assert.Equal(keyEncryptionKeyID, core.kekID)
|
||||
assert.Equal([]role.Role{role.Coordinator}, core.persistNodeStateRoles)
|
||||
|
||||
// Test SSH user & key creation. Both cases: "supposed to add" and "not supposed to add"
|
||||
// This slightly differs from a real environment (e.g. missing /var/home) but should be fine in the stub context with a virtual file system
|
||||
if tc.sshKeys != nil {
|
||||
passwd := user.Passwd{}
|
||||
entries, err := passwd.Parse(fs)
|
||||
require.NoError(err)
|
||||
for _, singleEntry := range entries {
|
||||
username := singleEntry.Gecos
|
||||
_, err := fs.Stat(fmt.Sprintf("/var/home/%s/.ssh/authorized_keys.d/constellation-ssh-keys", username))
|
||||
assert.NoError(err)
|
||||
}
|
||||
} else {
|
||||
passwd := user.Passwd{}
|
||||
_, err := passwd.Parse(fs)
|
||||
assert.EqualError(err, "open /etc/passwd: file does not exist")
|
||||
_, err = fs.Stat("/var/home")
|
||||
assert.EqualError(err, "open /var/home: file does not exist")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestActivateAdditionalNodes(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
testNode1 := newStubPeer("192.0.2.11", []byte{1, 2, 3})
|
||||
testNode2 := newStubPeer("192.0.2.12", []byte{2, 3, 4})
|
||||
testNode3 := newStubPeer("192.0.2.13", []byte{3, 4, 5})
|
||||
wantNode1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "10.118.0.11", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
||||
wantNode2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "10.118.0.12", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
||||
wantNode3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "10.118.0.13", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
||||
|
||||
testCases := map[string]struct {
|
||||
nodes []*stubPeer
|
||||
state state.State
|
||||
wantErr bool
|
||||
wantPeers []peer.Peer
|
||||
}{
|
||||
"0 nodes": {
|
||||
state: state.ActivatingNodes,
|
||||
},
|
||||
"1 node": {
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.ActivatingNodes,
|
||||
wantPeers: []peer.Peer{wantNode1},
|
||||
},
|
||||
"2 nodes": {
|
||||
nodes: []*stubPeer{testNode1, testNode2},
|
||||
state: state.ActivatingNodes,
|
||||
wantPeers: []peer.Peer{wantNode1, wantNode2},
|
||||
},
|
||||
"3 nodes": {
|
||||
nodes: []*stubPeer{testNode1, testNode2, testNode3},
|
||||
state: state.ActivatingNodes,
|
||||
wantPeers: []peer.Peer{wantNode1, wantNode2, wantNode3},
|
||||
},
|
||||
"uninitialized": {
|
||||
nodes: []*stubPeer{testNode1},
|
||||
wantErr: true,
|
||||
},
|
||||
"wrong peer kind": {
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.IsNode,
|
||||
wantErr: true,
|
||||
},
|
||||
"node activation error": {
|
||||
nodes: []*stubPeer{testNode1, {activateErr: someErr}, testNode3},
|
||||
state: state.ActivatingNodes,
|
||||
wantErr: true,
|
||||
},
|
||||
"node join error": {
|
||||
nodes: []*stubPeer{testNode1, {joinErr: someErr}, testNode3},
|
||||
state: state.ActivatingNodes,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
core := &fakeCore{state: tc.state}
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, fakeValidator{}, netDialer)
|
||||
|
||||
getPublicIPAddr := func() (string, error) {
|
||||
return "192.0.2.1", nil
|
||||
}
|
||||
|
||||
api := New(zaptest.NewLogger(t), &logging.NopLogger{}, core, dialer, nil, getPublicIPAddr, nil)
|
||||
defer api.Close()
|
||||
// spawn nodes
|
||||
var nodePublicIPs []string
|
||||
var wg sync.WaitGroup
|
||||
for _, n := range tc.nodes {
|
||||
nodePublicIPs = append(nodePublicIPs, n.peer.PublicIP)
|
||||
server := n.newServer()
|
||||
wg.Add(1)
|
||||
go func(endpoint string) {
|
||||
listener := netDialer.GetListener(endpoint)
|
||||
wg.Done()
|
||||
_ = server.Serve(listener)
|
||||
}(net.JoinHostPort(n.peer.PublicIP, endpointAVPNPort))
|
||||
defer server.GracefulStop()
|
||||
}
|
||||
wg.Wait()
|
||||
// since we are not activating the coordinator, initialize the store with IP's
|
||||
require.NoError(core.InitializeStoreIPs())
|
||||
stream := &stubActivateAdditionalNodesServer{}
|
||||
err := api.ActivateAdditionalNodes(&pubproto.ActivateAdditionalNodesRequest{NodePublicIps: nodePublicIPs}, stream)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
// Coordinator streams logs
|
||||
require.Len(stream.sent, len(tc.nodes)+1)
|
||||
for _, s := range stream.sent {
|
||||
assert.NotEmpty(s.GetLog().Message)
|
||||
}
|
||||
|
||||
// Core is updated
|
||||
assert.Equal(tc.wantPeers, core.peers)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssemblePeerStruct(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
getPublicIPAddr := func() (string, error) {
|
||||
return "192.0.2.1", nil
|
||||
}
|
||||
|
||||
vpnPubKey := []byte{2, 3, 4}
|
||||
core := &fakeCore{vpnPubKey: vpnPubKey}
|
||||
api := New(zaptest.NewLogger(t), &logging.NopLogger{}, core, nil, nil, getPublicIPAddr, nil)
|
||||
defer api.Close()
|
||||
|
||||
vpnIP, err := core.GetVPNIP()
|
||||
require.NoError(err)
|
||||
want := peer.Peer{
|
||||
PublicIP: "192.0.2.1",
|
||||
VPNIP: vpnIP,
|
||||
VPNPubKey: vpnPubKey,
|
||||
Role: role.Coordinator,
|
||||
}
|
||||
|
||||
actual, err := api.assemblePeerStruct(vpnIP, role.Coordinator)
|
||||
require.NoError(err)
|
||||
assert.Equal(want, actual)
|
||||
}
|
||||
|
||||
type stubPeer struct {
|
||||
peer peer.Peer
|
||||
activateAsNodeMessages []*pubproto.ActivateAsNodeResponse
|
||||
activateAsNodeReceive int
|
||||
activateErr error
|
||||
joinErr error
|
||||
getPubKeyErr error
|
||||
pubproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func newStubPeer(publicIP string, vpnPubKey []byte) *stubPeer {
|
||||
return &stubPeer{
|
||||
peer: peer.Peer{PublicIP: publicIP, VPNPubKey: vpnPubKey},
|
||||
activateAsNodeMessages: []*pubproto.ActivateAsNodeResponse{
|
||||
{Response: &pubproto.ActivateAsNodeResponse_StateDiskUuid{StateDiskUuid: "state-disk-uuid"}},
|
||||
{Response: &pubproto.ActivateAsNodeResponse_NodeVpnPubKey{NodeVpnPubKey: vpnPubKey}},
|
||||
},
|
||||
activateAsNodeReceive: 2,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *stubPeer) ActivateAsNode(stream pubproto.API_ActivateAsNodeServer) error {
|
||||
for _, message := range n.activateAsNodeMessages {
|
||||
err := stream.Send(message)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for i := 0; i < n.activateAsNodeReceive; i++ {
|
||||
_, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := stream.Recv(); err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.activateErr
|
||||
}
|
||||
|
||||
func (n *stubPeer) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.ActivateAsAdditionalCoordinatorRequest) (*pubproto.ActivateAsAdditionalCoordinatorResponse, error) {
|
||||
return &pubproto.ActivateAsAdditionalCoordinatorResponse{}, n.activateErr
|
||||
}
|
||||
|
||||
func (*stubPeer) TriggerNodeUpdate(ctx context.Context, in *pubproto.TriggerNodeUpdateRequest) (*pubproto.TriggerNodeUpdateResponse, error) {
|
||||
return &pubproto.TriggerNodeUpdateResponse{}, nil
|
||||
}
|
||||
|
||||
func (n *stubPeer) JoinCluster(ctx context.Context, in *pubproto.JoinClusterRequest) (*pubproto.JoinClusterResponse, error) {
|
||||
return &pubproto.JoinClusterResponse{}, n.joinErr
|
||||
}
|
||||
|
||||
func (n *stubPeer) GetPeerVPNPublicKey(ctx context.Context, in *pubproto.GetPeerVPNPublicKeyRequest) (*pubproto.GetPeerVPNPublicKeyResponse, error) {
|
||||
return &pubproto.GetPeerVPNPublicKeyResponse{CoordinatorPubKey: n.peer.VPNPubKey}, n.getPubKeyErr
|
||||
}
|
||||
|
||||
func (n *stubPeer) newServer() *grpc.Server {
|
||||
creds := atlscredentials.New(fakeIssuer{}, nil)
|
||||
server := grpc.NewServer(grpc.Creds(creds))
|
||||
pubproto.RegisterAPIServer(server, n)
|
||||
return server
|
||||
}
|
||||
|
||||
type stubVPNAPIServer struct{}
|
||||
|
||||
func (stubVPNAPIServer) Listen(endpoint string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stubVPNAPIServer) Serve() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stubVPNAPIServer) Close() {
|
||||
}
|
||||
|
||||
type fakeIssuer struct {
|
||||
oid.Dummy
|
||||
}
|
||||
|
||||
func (fakeIssuer) Issue(userData []byte, nonce []byte) ([]byte, error) {
|
||||
return userData, nil
|
||||
}
|
||||
|
||||
type fakeValidator struct {
|
||||
oid.Dummy
|
||||
}
|
||||
|
||||
func (fakeValidator) Validate(attdoc []byte, nonce []byte) ([]byte, error) {
|
||||
return attdoc, nil
|
||||
}
|
||||
|
||||
type stubActivateAsCoordinatorServer struct {
|
||||
grpc.ServerStream
|
||||
sent []*pubproto.ActivateAsCoordinatorResponse
|
||||
}
|
||||
|
||||
func (s *stubActivateAsCoordinatorServer) Send(req *pubproto.ActivateAsCoordinatorResponse) error {
|
||||
s.sent = append(s.sent, req)
|
||||
return nil
|
||||
}
|
||||
|
||||
type stubActivateAdditionalNodesServer struct {
|
||||
grpc.ServerStream
|
||||
sent []*pubproto.ActivateAdditionalNodesResponse
|
||||
}
|
||||
|
||||
func (s *stubActivateAdditionalNodesServer) Send(req *pubproto.ActivateAdditionalNodesResponse) error {
|
||||
s.sent = append(s.sent, req)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestRequestStateDiskKey(t *testing.T) {
|
||||
defaultKey := []byte("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
|
||||
someErr := errors.New("error")
|
||||
testCases := map[string]struct {
|
||||
state state.State
|
||||
dataKey []byte
|
||||
getDataKeyErr error
|
||||
pushKeyErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"success": {
|
||||
state: state.ActivatingNodes,
|
||||
dataKey: defaultKey,
|
||||
},
|
||||
"Coordinator in wrong state": {
|
||||
state: state.IsNode,
|
||||
dataKey: defaultKey,
|
||||
wantErr: true,
|
||||
},
|
||||
"GetDataKey fails": {
|
||||
state: state.ActivatingNodes,
|
||||
dataKey: defaultKey,
|
||||
getDataKeyErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
"key pushing fails": {
|
||||
state: state.ActivatingNodes,
|
||||
dataKey: defaultKey,
|
||||
pushKeyErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
issuer := atls.NewFakeIssuer(oid.Dummy{})
|
||||
|
||||
stateDiskServer := &stubStateDiskServer{pushKeyErr: tc.pushKeyErr}
|
||||
|
||||
// we can not use a bufconn here, since we rely on grpcpeer.FromContext() to connect to the caller
|
||||
listener, err := net.Listen("tcp", ":")
|
||||
require.NoError(err)
|
||||
defer listener.Close()
|
||||
|
||||
creds := atlscredentials.New(issuer, nil)
|
||||
s := grpc.NewServer(grpc.Creds(creds))
|
||||
keyproto.RegisterAPIServer(s, stateDiskServer)
|
||||
defer s.GracefulStop()
|
||||
go s.Serve(listener)
|
||||
|
||||
ctx := grpcpeer.NewContext(context.Background(), &grpcpeer.Peer{Addr: listener.Addr()})
|
||||
getPeerFromContext := func(ctx context.Context) (string, error) {
|
||||
peer, ok := grpcpeer.FromContext(ctx)
|
||||
if !ok {
|
||||
return "", errors.New("unable to get peer from context")
|
||||
}
|
||||
return peer.Addr.String(), nil
|
||||
}
|
||||
|
||||
core := &fakeCore{
|
||||
state: tc.state,
|
||||
dataKey: tc.dataKey,
|
||||
getDataKeyErr: tc.getDataKeyErr,
|
||||
}
|
||||
|
||||
api := New(zaptest.NewLogger(t), &logging.NopLogger{}, core, dialer.New(nil, dummyValidator{}, &net.Dialer{}), nil, nil, getPeerFromContext)
|
||||
|
||||
_, err = api.RequestStateDiskKey(ctx, &pubproto.RequestStateDiskKeyRequest{})
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
assert.Equal(tc.dataKey, stateDiskServer.receivedRequest.StateDiskKey)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type dummyValidator struct {
|
||||
oid.Dummy
|
||||
}
|
||||
|
||||
func (d dummyValidator) Validate(attdoc []byte, nonce []byte) ([]byte, error) {
|
||||
var attestation vtpm.AttestationDocument
|
||||
if err := json.Unmarshal(attdoc, &attestation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return attestation.UserData, nil
|
||||
}
|
||||
|
||||
type stubStateDiskServer struct {
|
||||
receivedRequest *keyproto.PushStateDiskKeyRequest
|
||||
pushKeyErr error
|
||||
keyproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func (s *stubStateDiskServer) PushStateDiskKey(ctx context.Context, in *keyproto.PushStateDiskKeyRequest) (*keyproto.PushStateDiskKeyResponse, error) {
|
||||
s.receivedRequest = in
|
||||
return &keyproto.PushStateDiskKeyResponse{}, s.pushKeyErr
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
kms "github.com/edgelesssys/constellation/kms/setup"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
type Core interface {
|
||||
GetVPNPubKey() ([]byte, error)
|
||||
SetVPNIP(string) error
|
||||
GetVPNIP() (string, error)
|
||||
InitializeStoreIPs() error
|
||||
GetNextNodeIP() (string, error)
|
||||
GetNextCoordinatorIP() (string, error)
|
||||
SwitchToPersistentStore() error
|
||||
GetIDs(masterSecret []byte) (ownerID []byte, clusterID []byte, err error)
|
||||
PersistNodeState(role role.Role, vpnIP string, ownerID []byte, clusterID []byte) error
|
||||
SetUpKMS(ctx context.Context, storageURI, kmsURI, kekID string, useExisting bool) error
|
||||
GetKMSInfo() (kms.KMSInformation, error)
|
||||
GetDataKey(ctx context.Context, keyID string, length int) ([]byte, error)
|
||||
GetDiskUUID() (string, error)
|
||||
UpdateDiskPassphrase(passphrase string) error
|
||||
|
||||
GetState() state.State
|
||||
RequireState(...state.State) error
|
||||
AdvanceState(newState state.State, ownerID, clusterID []byte) error
|
||||
|
||||
GetPeers(resourceVersion int) (int, []peer.Peer, error)
|
||||
AddPeer(peer.Peer) error
|
||||
AddPeerToStore(peer.Peer) error
|
||||
AddPeerToVPN(peer.Peer) error
|
||||
UpdatePeers([]peer.Peer) error
|
||||
|
||||
CreateSSHUsers([]ssh.UserKey) error
|
||||
|
||||
InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, id attestationtypes.ID, masterSecret []byte, sshUserKeys []*pubproto.SSHUserKey,
|
||||
) ([]byte, error)
|
||||
JoinCluster(ctx context.Context, joinToken *kubeadm.BootstrapTokenDiscovery, certificateKey string, role role.Role) error
|
||||
}
|
@ -1,178 +0,0 @@
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/netip"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/logger"
|
||||
kms "github.com/edgelesssys/constellation/kms/setup"
|
||||
"go.uber.org/zap/zapcore"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
type fakeCore struct {
|
||||
vpnPubKey []byte
|
||||
getvpnPubKeyErr error
|
||||
vpnIP string
|
||||
setVPNIPErr error
|
||||
nextNodeIP netip.Addr
|
||||
nextCoordinatorIP netip.Addr
|
||||
switchToPersistentStoreErr error
|
||||
state state.State
|
||||
ownerID []byte
|
||||
clusterID []byte
|
||||
peers []peer.Peer
|
||||
updatedPeers [][]peer.Peer
|
||||
kubeconfig []byte
|
||||
autoscalingNodeGroups []string
|
||||
joinArgs []kubeadm.BootstrapTokenDiscovery
|
||||
joinClusterErr error
|
||||
UpdatePeersErr error
|
||||
GetPeersErr error
|
||||
persistNodeStateRoles []role.Role
|
||||
persistNodeStateErr error
|
||||
kekID string
|
||||
dataKey []byte
|
||||
getDataKeyErr error
|
||||
linuxUserManager user.LinuxUserManager
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetVPNPubKey() ([]byte, error) {
|
||||
return c.vpnPubKey, c.getvpnPubKeyErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) SetVPNIP(ip string) error {
|
||||
if len(c.ownerID) == 0 || len(c.clusterID) == 0 {
|
||||
return errors.New("SetVPNIP called before IDs were set")
|
||||
}
|
||||
c.vpnIP = ip
|
||||
return c.setVPNIPErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) InitializeStoreIPs() error {
|
||||
c.nextCoordinatorIP = netip.AddrFrom4([4]byte{10, 118, 0, 1})
|
||||
c.nextNodeIP = netip.AddrFrom4([4]byte{10, 118, 0, 11})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetVPNIP() (string, error) {
|
||||
return c.vpnIP, nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetNextNodeIP() (string, error) {
|
||||
ip := c.nextNodeIP.String()
|
||||
c.nextNodeIP = c.nextNodeIP.Next()
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetNextCoordinatorIP() (string, error) {
|
||||
ip := c.nextCoordinatorIP.String()
|
||||
c.nextCoordinatorIP = c.nextCoordinatorIP.Next()
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) SwitchToPersistentStore() error {
|
||||
return c.switchToPersistentStoreErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetIDs(masterSecret []byte) (ownerID []byte, clusterID []byte, err error) {
|
||||
return c.ownerID, c.clusterID, nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetState() state.State {
|
||||
return c.state.Get()
|
||||
}
|
||||
|
||||
func (c *fakeCore) RequireState(states ...state.State) error {
|
||||
return c.state.Require(states...)
|
||||
}
|
||||
|
||||
func (c *fakeCore) AdvanceState(newState state.State, ownerID, clusterID []byte) error {
|
||||
c.ownerID = ownerID
|
||||
c.clusterID = clusterID
|
||||
c.state.Advance(newState)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetPeers(resourceVersion int) (int, []peer.Peer, error) {
|
||||
return 1, c.peers, c.GetPeersErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) AddPeer(peer peer.Peer) error {
|
||||
c.peers = append(c.peers, peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) AddPeerToStore(peer peer.Peer) error {
|
||||
c.peers = append(c.peers, peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) AddPeerToVPN(peer peer.Peer) error {
|
||||
c.peers = append(c.peers, peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) UpdatePeers(peers []peer.Peer) error {
|
||||
c.updatedPeers = append(c.updatedPeers, peers)
|
||||
return c.UpdatePeersErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, id attestationtypes.ID, masterSecret []byte, sshUsers []*pubproto.SSHUserKey,
|
||||
) ([]byte, error) {
|
||||
c.autoscalingNodeGroups = autoscalingNodeGroups
|
||||
return c.kubeconfig, nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, _ string, _ role.Role) error {
|
||||
c.joinArgs = append(c.joinArgs, *args)
|
||||
return c.joinClusterErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) PersistNodeState(role role.Role, vpnIP string, ownerID []byte, clusterID []byte) error {
|
||||
c.persistNodeStateRoles = append(c.persistNodeStateRoles, role)
|
||||
return c.persistNodeStateErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) SetUpKMS(ctx context.Context, storageURI, kmsURI, kekID string, useExisting bool) error {
|
||||
c.kekID = kekID
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetKMSInfo() (kms.KMSInformation, error) {
|
||||
return kms.KMSInformation{}, nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetDataKey(ctx context.Context, keyID string, length int) ([]byte, error) {
|
||||
return c.dataKey, c.getDataKeyErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetDiskUUID() (string, error) {
|
||||
return "fake-disk-uuid", nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) UpdateDiskPassphrase(passphrase string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) CreateSSHUsers(sshUserKeys []ssh.UserKey) error {
|
||||
sshAccess := ssh.NewAccess(logger.New(logger.PlainLog, zapcore.DebugLevel), c.linuxUserManager)
|
||||
ctx := context.Background()
|
||||
|
||||
for _, pair := range sshUserKeys {
|
||||
if err := sshAccess.DeployAuthorizedKey(ctx, pair); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,312 +0,0 @@
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
// ActivateAsAdditionalCoordinator is the RPC call to activate subsequent coordinators.
|
||||
func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.ActivateAsAdditionalCoordinatorRequest) (out *pubproto.ActivateAsAdditionalCoordinatorResponse, reterr error) {
|
||||
_, cancel := context.WithTimeout(ctx, deadlineDuration)
|
||||
defer cancel()
|
||||
a.mut.Lock()
|
||||
defer a.mut.Unlock()
|
||||
|
||||
if err := a.core.RequireState(state.AcceptingInit); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "node is not in required state: %v", err)
|
||||
}
|
||||
// Some of the following actions can't be reverted (yet). If there's an
|
||||
// error, we may be in a weird state. Thus, mark this peer as failed.
|
||||
defer func() {
|
||||
if reterr != nil {
|
||||
_ = a.core.AdvanceState(state.Failed, nil, nil)
|
||||
}
|
||||
}()
|
||||
|
||||
// AdvanceState MUST be called before any other functions that are not sanity checks or otherwise required
|
||||
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
|
||||
// Any new additions to ActivateAsAdditionalCoordinator MUST come after
|
||||
if err := a.core.AdvanceState(state.ActivatingNodes, in.OwnerId, in.ClusterId); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "advance state to ActivatingNodes: %v", err)
|
||||
}
|
||||
|
||||
// Setup SSH users on subsequent coordinator, if defined
|
||||
if len(in.SshUserKeys) != 0 {
|
||||
sshUserKeys := ssh.FromProtoSlice(in.SshUserKeys)
|
||||
if err := a.core.CreateSSHUsers(sshUserKeys); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "creating SSH users on additional coordinators: %v", err)
|
||||
}
|
||||
}
|
||||
// add one coordinator to the VPN
|
||||
if err := a.core.SetVPNIP(in.AssignedVpnIp); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "set vpn IP address: %v", err)
|
||||
}
|
||||
|
||||
if err := a.core.AddPeerToVPN(peer.FromPubProto([]*pubproto.Peer{in.ActivatingCoordinatorData})[0]); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "adding initial peers to vpn: %v", err)
|
||||
}
|
||||
|
||||
// run the VPN-API server
|
||||
if err := a.StartVPNAPIServer(in.AssignedVpnIp); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "start vpnAPIServer: %v", err)
|
||||
}
|
||||
|
||||
a.logger.Info("retrieving k8s join information ")
|
||||
joinArgs, certKey, err := a.getk8SCoordinatorJoinArgs(ctx, in.ActivatingCoordinatorData.VpnIp, vpnAPIPort)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "error in getk8sJoinArgs: %v", err)
|
||||
}
|
||||
// Before we join the cluster we need to be able to communicate with ALL other control-planes
|
||||
err = a.core.UpdatePeers(peer.FromPubProto(in.Peers))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "add peers to vpn: %v", err)
|
||||
}
|
||||
a.logger.Info("about to join the k8s cluster")
|
||||
err = a.core.JoinCluster(context.TODO(), joinArgs, certKey, role.Coordinator)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
// ATTENTION: STORE HAS TO BE EMPTY (NO OVERLAPPING KEYS) WHEN THIS FUNCTION IS CALLED
|
||||
if err := a.core.SwitchToPersistentStore(); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "switch to persistent store: %v", err)
|
||||
}
|
||||
a.logger.Info("Transition to persistent store successful")
|
||||
|
||||
kmsData, err := a.core.GetKMSInfo()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
if err := a.core.SetUpKMS(ctx, kmsData.StorageUri, kmsData.KmsUri, kmsData.KeyEncryptionKeyID, false); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
// persist node state on disk
|
||||
if err := a.core.PersistNodeState(role.Coordinator, in.AssignedVpnIp, in.OwnerId, in.ClusterId); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||
}
|
||||
diskUUID, err := a.core.GetDiskUUID()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "getting disk uuid: %v", err)
|
||||
}
|
||||
diskKey, err := a.core.GetDataKey(ctx, diskUUID, 32)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "getting disk key: %v", err)
|
||||
}
|
||||
if err := a.core.UpdateDiskPassphrase(string(diskKey)); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "updating disk key: %v", err)
|
||||
}
|
||||
|
||||
// regularly get (peer) updates from etcd
|
||||
// start update before manual peer add to omit race conditions when multiple coordinator are activating nodes
|
||||
|
||||
thisPeer, err := a.assemblePeerStruct(in.AssignedVpnIp, role.Coordinator)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "assembling coordinator peer struct: %v", err)
|
||||
}
|
||||
if err := a.core.AddPeerToStore(thisPeer); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "adding new coordinator to persistent store: %v", err)
|
||||
}
|
||||
|
||||
resourceVersion, peers, err := a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "get peers from store: %v", err)
|
||||
}
|
||||
a.resourceVersion = resourceVersion
|
||||
|
||||
err = a.core.UpdatePeers(peers)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "synchronizing peers with vpn state: %v", err)
|
||||
}
|
||||
// Manually trigger an update operation on all peers.
|
||||
// This may be expendable in the future, depending on whether it's acceptable that it takes
|
||||
// some seconds until the nodes get all peer data via their regular update requests.
|
||||
_, peers, err = a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "get peers from store: %v", err)
|
||||
}
|
||||
for _, p := range peers {
|
||||
if p.Role == role.Node {
|
||||
if err := a.triggerNodeUpdate(p.PublicIP); err != nil {
|
||||
a.logger.Error("triggerNodeUpdate failed", zap.Error(err), zap.String("endpoint", p.PublicIP), zap.String("vpnip", p.VPNIP))
|
||||
}
|
||||
}
|
||||
if p.Role == role.Coordinator && p.VPNIP != thisPeer.VPNIP {
|
||||
if err := a.triggerCoordinatorUpdate(context.TODO(), p.PublicIP); err != nil {
|
||||
a.logger.Error("triggerCoordinatorUpdate failed", zap.Error(err), zap.String("endpoint", p.PublicIP), zap.String("vpnip", p.VPNIP))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &pubproto.ActivateAsAdditionalCoordinatorResponse{}, nil
|
||||
}
|
||||
|
||||
func (a *API) ActivateAdditionalCoordinator(ctx context.Context, in *pubproto.ActivateAdditionalCoordinatorRequest) (*pubproto.ActivateAdditionalCoordinatorResponse, error) {
|
||||
err := a.activateCoordinator(ctx, in.CoordinatorPublicIp, in.SshUserKeys)
|
||||
if err != nil {
|
||||
a.logger.Error("coordinator activation failed", zap.Error(err))
|
||||
return nil, status.Errorf(codes.Internal, "activate new coordinator: %v", err)
|
||||
}
|
||||
return &pubproto.ActivateAdditionalCoordinatorResponse{}, nil
|
||||
}
|
||||
|
||||
func (a *API) activateCoordinators(logToCLI logFunc, coordinatorPublicIPs []string, sshUserKeys []*pubproto.SSHUserKey) error {
|
||||
// Activate all coordinators.
|
||||
for num, coordinatorPublicIP := range coordinatorPublicIPs {
|
||||
logToCLI("Activating control-plane node %3d out of %3d ...", num+2, len(coordinatorPublicIPs)+1)
|
||||
if err := a.activateCoordinator(context.TODO(), coordinatorPublicIP, sshUserKeys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *API) activateCoordinator(ctx context.Context, coordinatorIP string, sshUserKeys []*pubproto.SSHUserKey) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, deadlineDuration)
|
||||
defer cancel()
|
||||
|
||||
if err := a.core.RequireState(state.ActivatingNodes); err != nil {
|
||||
return fmt.Errorf("coordinator is not in required state: %v", err)
|
||||
}
|
||||
assignedVPNIP, err := a.core.GetNextCoordinatorIP()
|
||||
if err != nil {
|
||||
return fmt.Errorf("requesting new coordinator vpn IP address: %v", err)
|
||||
}
|
||||
vpnIP, err := a.core.GetVPNIP()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get own vpn IP address: %v", err)
|
||||
}
|
||||
thisPeer, err := a.assemblePeerStruct(vpnIP, role.Coordinator)
|
||||
if err != nil {
|
||||
return fmt.Errorf("assembling coordinator peer struct: %v", err)
|
||||
}
|
||||
ownerID, clusterID, err := a.core.GetIDs(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get owner and cluster ID: %v", err)
|
||||
}
|
||||
_, peers, err := a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn, err := a.dialer.Dial(ctx, net.JoinHostPort(coordinatorIP, endpointAVPNPort))
|
||||
if err != nil {
|
||||
return fmt.Errorf("dialing new coordinator: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
// This call can be omitted, since this function will be called by the ToActivating Coordinator
|
||||
// and he knows his own PubKey, so he can pass this as argument.
|
||||
// TODO: Remove this gRPC function when we have working integration.
|
||||
resp, err := client.GetPeerVPNPublicKey(ctx, &pubproto.GetPeerVPNPublicKeyRequest{})
|
||||
if err != nil {
|
||||
a.logger.Error("failed to get PubKey from new coordinator", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
newCoordinatorPeer := peer.Peer{VPNIP: assignedVPNIP, PublicIP: coordinatorIP, VPNPubKey: resp.CoordinatorPubKey, Role: role.Coordinator}
|
||||
err = a.core.AddPeer(newCoordinatorPeer)
|
||||
if err != nil {
|
||||
a.logger.Error("failed to store new coordinator data", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
for _, p := range peers {
|
||||
if p.Role == role.Coordinator && p.VPNIP != thisPeer.VPNIP {
|
||||
if err := a.triggerCoordinatorUpdate(context.TODO(), p.PublicIP); err != nil {
|
||||
a.logger.Error("failed to trigger coordinator update", zap.Error(err), zap.String("endpoint", p.PublicIP), zap.String("vpnip", p.VPNIP))
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = client.ActivateAsAdditionalCoordinator(ctx, &pubproto.ActivateAsAdditionalCoordinatorRequest{
|
||||
AssignedVpnIp: assignedVPNIP,
|
||||
ActivatingCoordinatorData: peer.ToPubProto([]peer.Peer{thisPeer})[0],
|
||||
Peers: peer.ToPubProto(peers),
|
||||
OwnerId: ownerID,
|
||||
ClusterId: clusterID,
|
||||
SshUserKeys: sshUserKeys,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *API) TriggerCoordinatorUpdate(ctx context.Context, in *pubproto.TriggerCoordinatorUpdateRequest) (*pubproto.TriggerCoordinatorUpdateResponse, error) {
|
||||
if err := a.core.RequireState(state.ActivatingNodes); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "coordinator is not in required state for updating state: %v", err)
|
||||
}
|
||||
resourceVersion, peers, err := a.core.GetPeers(a.resourceVersion)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "get peers from store: %v", err)
|
||||
}
|
||||
if resourceVersion == a.resourceVersion {
|
||||
a.logger.Info("ressource version identical, no need to update")
|
||||
return &pubproto.TriggerCoordinatorUpdateResponse{}, nil
|
||||
}
|
||||
err = a.core.UpdatePeers(peers)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "synchronizing peers with vpn state: %v", err)
|
||||
}
|
||||
a.resourceVersion = resourceVersion
|
||||
return &pubproto.TriggerCoordinatorUpdateResponse{}, nil
|
||||
}
|
||||
|
||||
// GetPeerVPNPublicKey return the VPN publicKey of the peer.
|
||||
func (a *API) GetPeerVPNPublicKey(ctx context.Context, in *pubproto.GetPeerVPNPublicKeyRequest) (*pubproto.GetPeerVPNPublicKeyResponse, error) {
|
||||
key, err := a.core.GetVPNPubKey()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "obtaining VPNPubKey: %v", err)
|
||||
}
|
||||
return &pubproto.GetPeerVPNPublicKeyResponse{CoordinatorPubKey: key}, nil
|
||||
}
|
||||
|
||||
func (a *API) triggerCoordinatorUpdate(ctx context.Context, publicIP string) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, deadlineDuration)
|
||||
defer cancel()
|
||||
|
||||
// We don't verify the peer certificate here, since TriggerNodeUpdate triggers a connection over VPN
|
||||
// The target of the rpc needs to already be part of the VPN to process the request, meaning it is trusted
|
||||
conn, err := a.dialer.DialNoVerify(ctx, net.JoinHostPort(publicIP, endpointAVPNPort))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
_, err = client.TriggerCoordinatorUpdate(ctx, &pubproto.TriggerCoordinatorUpdateRequest{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *API) getk8SCoordinatorJoinArgs(ctx context.Context, coordinatorIP, port string) (*kubeadm.BootstrapTokenDiscovery, string, error) {
|
||||
conn, err := a.dialer.DialInsecure(ctx, net.JoinHostPort(coordinatorIP, port))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer conn.Close()
|
||||
client := vpnproto.NewAPIClient(conn)
|
||||
// since the key has to generated every time, this gRPC induces ~1s overhead.
|
||||
resp, err := client.GetK8SCertificateKey(ctx, &vpnproto.GetK8SCertificateKeyRequest{})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
joinArgs, err := client.GetK8SJoinArgs(ctx, &vpnproto.GetK8SJoinArgsRequest{})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
joinToken := &kubeadm.BootstrapTokenDiscovery{
|
||||
Token: joinArgs.Token,
|
||||
APIServerEndpoint: joinArgs.ApiServerEndpoint,
|
||||
CACertHashes: []string{joinArgs.DiscoveryTokenCaCertHash},
|
||||
}
|
||||
|
||||
return joinToken, resp.CertificateKey, err
|
||||
}
|
@ -1,319 +0,0 @@
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/logging"
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/testdialer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
func TestActivateAsAdditionalCoordinator(t *testing.T) {
|
||||
coordinatorPubKey := []byte{6, 7, 8}
|
||||
testCoord1 := stubPeer{peer: peer.Peer{PublicIP: "192.0.2.11", VPNPubKey: []byte{1, 2, 3}, VPNIP: "10.118.0.1", Role: role.Coordinator}}
|
||||
stubVPN := stubVPNAPI{joinArgs: kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "endp",
|
||||
Token: "token",
|
||||
CACertHashes: []string{"dis"},
|
||||
}}
|
||||
|
||||
someErr := errors.New("some error")
|
||||
testCases := map[string]struct {
|
||||
coordinators stubPeer
|
||||
state state.State
|
||||
wantState state.State
|
||||
vpnapi stubVPNAPI
|
||||
wantErr bool
|
||||
switchToPersistentStoreErr error
|
||||
k8sJoinargsErr error
|
||||
k8sCertKeyErr error
|
||||
}{
|
||||
"basic": {
|
||||
coordinators: testCoord1,
|
||||
state: state.AcceptingInit,
|
||||
wantState: state.ActivatingNodes,
|
||||
vpnapi: stubVPN,
|
||||
},
|
||||
"already activated": {
|
||||
state: state.ActivatingNodes,
|
||||
wantErr: true,
|
||||
wantState: state.ActivatingNodes,
|
||||
vpnapi: stubVPN,
|
||||
},
|
||||
"SwitchToPersistentStore error": {
|
||||
coordinators: testCoord1,
|
||||
state: state.AcceptingInit,
|
||||
switchToPersistentStoreErr: someErr,
|
||||
wantErr: true,
|
||||
wantState: state.Failed,
|
||||
vpnapi: stubVPN,
|
||||
},
|
||||
"GetK8SJoinArgs error": {
|
||||
coordinators: testCoord1,
|
||||
state: state.AcceptingInit,
|
||||
switchToPersistentStoreErr: someErr,
|
||||
wantErr: true,
|
||||
wantState: state.Failed,
|
||||
vpnapi: stubVPN,
|
||||
k8sJoinargsErr: someErr,
|
||||
},
|
||||
"GetK8SCertificateKeyErr error": {
|
||||
coordinators: testCoord1,
|
||||
state: state.AcceptingInit,
|
||||
switchToPersistentStoreErr: someErr,
|
||||
wantErr: true,
|
||||
wantState: state.Failed,
|
||||
vpnapi: stubVPN,
|
||||
k8sCertKeyErr: someErr,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
tc.vpnapi.getJoinArgsErr = tc.k8sJoinargsErr
|
||||
tc.vpnapi.getK8SCertKeyErr = tc.k8sCertKeyErr
|
||||
core := &fakeCore{
|
||||
state: tc.state,
|
||||
vpnPubKey: coordinatorPubKey,
|
||||
switchToPersistentStoreErr: tc.switchToPersistentStoreErr,
|
||||
kubeconfig: []byte("kubeconfig"),
|
||||
ownerID: []byte("ownerID"),
|
||||
clusterID: []byte("clusterID"),
|
||||
}
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, fakeValidator{}, netDialer)
|
||||
|
||||
getPublicIPAddr := func() (string, error) {
|
||||
return "192.0.2.1", nil
|
||||
}
|
||||
|
||||
api := New(zaptest.NewLogger(t), &logging.NopLogger{}, core, dialer, stubVPNAPIServer{}, getPublicIPAddr, nil)
|
||||
defer api.Close()
|
||||
|
||||
// spawn vpnServer
|
||||
vpnapiServer := tc.vpnapi.newServer()
|
||||
go vpnapiServer.Serve(netDialer.GetListener(net.JoinHostPort(tc.coordinators.peer.VPNIP, vpnAPIPort)))
|
||||
defer vpnapiServer.GracefulStop()
|
||||
|
||||
_, err := api.ActivateAsAdditionalCoordinator(context.Background(), &pubproto.ActivateAsAdditionalCoordinatorRequest{
|
||||
AssignedVpnIp: "10.118.0.2",
|
||||
ActivatingCoordinatorData: peer.ToPubProto([]peer.Peer{tc.coordinators.peer})[0],
|
||||
OwnerId: core.ownerID,
|
||||
ClusterId: core.clusterID,
|
||||
})
|
||||
|
||||
assert.Equal(tc.wantState, core.state)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTriggerCoordinatorUpdate(t *testing.T) {
|
||||
// someErr := errors.New("failed")
|
||||
peers := []peer.Peer{
|
||||
{PublicIP: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}},
|
||||
{PublicIP: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
peers []peer.Peer
|
||||
state state.State
|
||||
getUpdateErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"basic": {
|
||||
peers: peers,
|
||||
state: state.ActivatingNodes,
|
||||
},
|
||||
"not activated": {
|
||||
peers: peers,
|
||||
state: state.AcceptingInit,
|
||||
wantErr: true,
|
||||
},
|
||||
"wrong peer kind": {
|
||||
peers: peers,
|
||||
state: state.IsNode,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
logger := zaptest.NewLogger(t)
|
||||
core := &fakeCore{
|
||||
state: tc.state,
|
||||
peers: tc.peers,
|
||||
}
|
||||
dialer := dialer.New(nil, fakeValidator{}, nil)
|
||||
|
||||
api := New(logger, &logging.NopLogger{}, core, dialer, nil, nil, nil)
|
||||
|
||||
_, err := api.TriggerCoordinatorUpdate(context.Background(), &pubproto.TriggerCoordinatorUpdateRequest{})
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
// second update should be a noop
|
||||
_, err = api.TriggerCoordinatorUpdate(context.Background(), &pubproto.TriggerCoordinatorUpdateRequest{})
|
||||
require.NoError(err)
|
||||
|
||||
require.Len(core.updatedPeers, 1)
|
||||
assert.Equal(tc.peers, core.updatedPeers[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestActivateAdditionalCoordinators(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
coordinatorPubKey := []byte{6, 7, 8}
|
||||
testCoord1 := stubPeer{peer: peer.Peer{PublicIP: "192.0.2.11", VPNPubKey: []byte{1, 2, 3}, VPNIP: "10.118.0.1", Role: role.Coordinator}}
|
||||
|
||||
testCases := map[string]struct {
|
||||
coordinators stubPeer
|
||||
state state.State
|
||||
activateErr error
|
||||
getPublicKeyErr error
|
||||
wantErr bool
|
||||
wantState state.State
|
||||
}{
|
||||
"basic": {
|
||||
coordinators: testCoord1,
|
||||
state: state.ActivatingNodes,
|
||||
wantState: state.ActivatingNodes,
|
||||
},
|
||||
"Activation Err": {
|
||||
coordinators: testCoord1,
|
||||
state: state.ActivatingNodes,
|
||||
wantState: state.ActivatingNodes,
|
||||
activateErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
"Not in exprected state": {
|
||||
coordinators: testCoord1,
|
||||
state: state.AcceptingInit,
|
||||
wantState: state.AcceptingInit,
|
||||
wantErr: true,
|
||||
},
|
||||
"getPeerPublicKey error": {
|
||||
coordinators: testCoord1,
|
||||
state: state.ActivatingNodes,
|
||||
wantState: state.ActivatingNodes,
|
||||
getPublicKeyErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
core := &fakeCore{
|
||||
state: tc.state,
|
||||
vpnPubKey: coordinatorPubKey,
|
||||
kubeconfig: []byte("kubeconfig"),
|
||||
ownerID: []byte("ownerID"),
|
||||
clusterID: []byte("clusterID"),
|
||||
}
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, fakeValidator{}, netDialer)
|
||||
|
||||
getPublicIPAddr := func() (string, error) {
|
||||
return "192.0.2.1", nil
|
||||
}
|
||||
|
||||
api := New(zaptest.NewLogger(t), &logging.NopLogger{}, core, dialer, stubVPNAPIServer{}, getPublicIPAddr, nil)
|
||||
defer api.Close()
|
||||
|
||||
// spawn coordinator
|
||||
tc.coordinators.activateErr = tc.activateErr
|
||||
tc.coordinators.getPubKeyErr = tc.getPublicKeyErr
|
||||
server := tc.coordinators.newServer()
|
||||
go server.Serve(netDialer.GetListener(net.JoinHostPort(tc.coordinators.peer.PublicIP, endpointAVPNPort)))
|
||||
defer server.GracefulStop()
|
||||
|
||||
_, err := api.ActivateAdditionalCoordinator(context.Background(), &pubproto.ActivateAdditionalCoordinatorRequest{CoordinatorPublicIp: tc.coordinators.peer.PublicIP})
|
||||
|
||||
assert.Equal(tc.wantState, core.state)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPeerVPNPublicKey(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
testCoord := stubPeer{peer: peer.Peer{PublicIP: "192.0.2.11", VPNPubKey: []byte{1, 2, 3}, VPNIP: "10.118.0.1", Role: role.Coordinator}}
|
||||
|
||||
testCases := map[string]struct {
|
||||
coordinator stubPeer
|
||||
getVPNPubKeyErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"basic": {
|
||||
coordinator: testCoord,
|
||||
},
|
||||
"Activation Err": {
|
||||
coordinator: testCoord,
|
||||
getVPNPubKeyErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
core := &fakeCore{
|
||||
vpnPubKey: tc.coordinator.peer.VPNPubKey,
|
||||
getvpnPubKeyErr: tc.getVPNPubKeyErr,
|
||||
}
|
||||
dialer := dialer.New(nil, fakeValidator{}, testdialer.NewBufconnDialer())
|
||||
|
||||
getPublicIPAddr := func() (string, error) {
|
||||
return "192.0.2.1", nil
|
||||
}
|
||||
|
||||
api := New(zaptest.NewLogger(t), &logging.NopLogger{}, core, dialer, stubVPNAPIServer{}, getPublicIPAddr, nil)
|
||||
defer api.Close()
|
||||
|
||||
resp, err := api.GetPeerVPNPublicKey(context.Background(), &pubproto.GetPeerVPNPublicKeyRequest{})
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.coordinator.peer.VPNPubKey, resp.CoordinatorPubKey)
|
||||
})
|
||||
}
|
||||
}
|
@ -1,272 +0,0 @@
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
/*
|
||||
+-------------+ +-------+
|
||||
| coordinator | | node |
|
||||
+-------------+ +-------+
|
||||
| |
|
||||
| initial request |
|
||||
|-------------------->|
|
||||
| | -------------------------------------------\
|
||||
| |-| update state "NodeWaitingForClusterJoin" |
|
||||
| | |------------------------------------------|
|
||||
| | ------------\
|
||||
| |-| setup VPN |
|
||||
| | |-----------|
|
||||
| | ---------------------\
|
||||
| |-| persist node state |
|
||||
| | |--------------------|
|
||||
| |
|
||||
| state disk uuid |
|
||||
|<--------------------|
|
||||
------------------------\ | |
|
||||
| derive state disk key |-| |
|
||||
|-----------------------| | |
|
||||
| |
|
||||
| state disk key |
|
||||
|-------------------->|
|
||||
| | -------------------------------\
|
||||
| |-| update state disk passphrase |
|
||||
| | |------------------------------|
|
||||
| |
|
||||
| VPN public key |
|
||||
|<--------------------|
|
||||
| |
|
||||
*/
|
||||
|
||||
// ActivateAsNode is the RPC call to activate a Node.
|
||||
func (a *API) ActivateAsNode(stream pubproto.API_ActivateAsNodeServer) (reterr error) {
|
||||
a.mut.Lock()
|
||||
defer a.mut.Unlock()
|
||||
|
||||
if err := a.core.RequireState(state.AcceptingInit); err != nil {
|
||||
return status.Errorf(codes.FailedPrecondition, "node is not in required state for activation: %v", err)
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator -> initial request -> node
|
||||
*/
|
||||
message, err := stream.Recv()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "receiving initial request from coordinator: %v", err)
|
||||
}
|
||||
initialRequest, ok := message.GetRequest().(*pubproto.ActivateAsNodeRequest_InitialRequest)
|
||||
if !ok {
|
||||
return status.Error(codes.Internal, "expected initial request but got different message type")
|
||||
}
|
||||
in := initialRequest.InitialRequest
|
||||
if len(in.OwnerId) == 0 || len(in.ClusterId) == 0 {
|
||||
a.logger.Error("missing data to taint worker node as initialized")
|
||||
return status.Error(codes.InvalidArgument, "missing data to taint worker node as initialized")
|
||||
}
|
||||
|
||||
// If any of the following actions fail, we cannot revert.
|
||||
// Thus, mark this peer as failed.
|
||||
defer func() {
|
||||
if reterr != nil {
|
||||
_ = a.core.AdvanceState(state.Failed, nil, nil)
|
||||
}
|
||||
}()
|
||||
|
||||
// AdvanceState MUST be called before any other functions that are not sanity checks or otherwise required
|
||||
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
|
||||
// Any new additions to ActivateAsNode MUST come after
|
||||
if err := a.core.AdvanceState(state.NodeWaitingForClusterJoin, in.OwnerId, in.ClusterId); err != nil {
|
||||
return status.Errorf(codes.Internal, "advance node state: %v", err)
|
||||
}
|
||||
|
||||
// Setup SSH users for the node, if defined
|
||||
if len(in.SshUserKeys) != 0 {
|
||||
sshUserKeys := ssh.FromProtoSlice(in.SshUserKeys)
|
||||
if err := a.core.CreateSSHUsers(sshUserKeys); err != nil {
|
||||
return status.Errorf(codes.Internal, "creating SSH users on node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
vpnPubKey, err := a.core.GetVPNPubKey()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "get vpn publicKey: %v", err)
|
||||
}
|
||||
|
||||
if err := a.core.SetVPNIP(in.NodeVpnIp); err != nil {
|
||||
return status.Errorf(codes.Internal, "setting node vpn IP address: %v", err)
|
||||
}
|
||||
|
||||
// add initial peers
|
||||
if err := a.core.UpdatePeers(peer.FromPubProto(in.Peers)); err != nil {
|
||||
return status.Errorf(codes.Internal, "synchronizing peers with vpn state: %v", err)
|
||||
}
|
||||
|
||||
// persist node state on disk
|
||||
if err := a.core.PersistNodeState(role.Node, in.NodeVpnIp, in.OwnerId, in.ClusterId); err != nil {
|
||||
return status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator <- state disk uuid <- node
|
||||
*/
|
||||
diskUUID, err := a.core.GetDiskUUID()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "get disk uuid: %v", err)
|
||||
}
|
||||
if err := stream.Send(&pubproto.ActivateAsNodeResponse{
|
||||
Response: &pubproto.ActivateAsNodeResponse_StateDiskUuid{StateDiskUuid: diskUUID},
|
||||
}); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator -> state disk key -> node
|
||||
*/
|
||||
message, err = stream.Recv()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "failed to receive state disk key from coordinator: %v", err)
|
||||
}
|
||||
diskKey, ok := message.GetRequest().(*pubproto.ActivateAsNodeRequest_StateDiskKey)
|
||||
if !ok {
|
||||
return status.Error(codes.Internal, "expected state disk key but got different message type")
|
||||
}
|
||||
if diskKey.StateDiskKey == nil {
|
||||
return status.Error(codes.Internal, "empty state disk key message from coordinator")
|
||||
}
|
||||
if err := a.core.UpdateDiskPassphrase(string(diskKey.StateDiskKey)); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
// regularly get (peer) updates from Coordinator
|
||||
a.StartUpdateLoop()
|
||||
|
||||
/*
|
||||
coordinator <- VPN public key <- node
|
||||
*/
|
||||
if err := stream.Send(&pubproto.ActivateAsNodeResponse{
|
||||
Response: &pubproto.ActivateAsNodeResponse_NodeVpnPubKey{
|
||||
NodeVpnPubKey: vpnPubKey,
|
||||
},
|
||||
}); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// JoinCluster is the RPC call to request this node to join the cluster.
|
||||
func (a *API) JoinCluster(ctx context.Context, in *pubproto.JoinClusterRequest) (*pubproto.JoinClusterResponse, error) {
|
||||
a.mut.Lock()
|
||||
defer a.mut.Unlock()
|
||||
|
||||
if err := a.core.RequireState(state.NodeWaitingForClusterJoin); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "node is not in required state for cluster join: %v", err)
|
||||
}
|
||||
|
||||
conn, err := a.dialer.DialInsecure(ctx, net.JoinHostPort(in.CoordinatorVpnIp, vpnAPIPort))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "dial coordinator: %v", err)
|
||||
}
|
||||
resp, err := vpnproto.NewAPIClient(conn).GetK8SJoinArgs(ctx, &vpnproto.GetK8SJoinArgsRequest{})
|
||||
conn.Close()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "request K8s join string: %v", err)
|
||||
}
|
||||
|
||||
err = a.core.JoinCluster(context.TODO(), &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: resp.ApiServerEndpoint,
|
||||
Token: resp.Token,
|
||||
CACertHashes: []string{resp.DiscoveryTokenCaCertHash},
|
||||
}, "", role.Node)
|
||||
if err != nil {
|
||||
_ = a.core.AdvanceState(state.Failed, nil, nil)
|
||||
return nil, status.Errorf(codes.Internal, "joining Kubernetes cluster: %v", err)
|
||||
}
|
||||
|
||||
if err := a.core.AdvanceState(state.IsNode, nil, nil); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "advance state to IsNode: %v", err)
|
||||
}
|
||||
|
||||
return &pubproto.JoinClusterResponse{}, nil
|
||||
}
|
||||
|
||||
// TriggerNodeUpdate is the RPC call to request this node to get an update from the Coordinator.
|
||||
func (a *API) TriggerNodeUpdate(ctx context.Context, in *pubproto.TriggerNodeUpdateRequest) (*pubproto.TriggerNodeUpdateResponse, error) {
|
||||
if err := a.core.RequireState(state.IsNode); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "node is not in required state for receiving update command: %v", err)
|
||||
}
|
||||
if err := a.update(ctx); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "node update: %v", err)
|
||||
}
|
||||
return &pubproto.TriggerNodeUpdateResponse{}, nil
|
||||
}
|
||||
|
||||
// StartUpdateLoop starts a loop that will periodically request updates from the Coordinator.
|
||||
func (a *API) StartUpdateLoop() {
|
||||
a.wgClose.Add(1)
|
||||
go a.updateLoop()
|
||||
}
|
||||
|
||||
func (a *API) updateLoop() {
|
||||
defer a.wgClose.Done()
|
||||
ticker := time.NewTicker(updateInterval)
|
||||
|
||||
for {
|
||||
if err := a.update(context.Background()); err != nil {
|
||||
a.logger.Error("updateLoop: update failed", zap.Error(err))
|
||||
}
|
||||
select {
|
||||
case <-a.stopUpdate:
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *API) update(ctx context.Context) error {
|
||||
a.mut.Lock()
|
||||
defer a.mut.Unlock()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, deadlineDuration)
|
||||
defer cancel()
|
||||
|
||||
// TODO: replace hardcoded IP
|
||||
conn, err := a.dialer.DialInsecure(ctx, net.JoinHostPort("10.118.0.1", vpnAPIPort))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := vpnproto.NewAPIClient(conn).GetUpdate(ctx, &vpnproto.GetUpdateRequest{ResourceVersion: int64(a.resourceVersion)})
|
||||
conn.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resourceVersion := int(resp.ResourceVersion)
|
||||
if resourceVersion == a.resourceVersion {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO does this naive approach of performing full updates everytime need to be replaced by something more clever like watches in K8s?
|
||||
// https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes
|
||||
|
||||
if err := a.core.UpdatePeers(peer.FromVPNProto(resp.Peers)); err != nil {
|
||||
return err
|
||||
}
|
||||
a.resourceVersion = resourceVersion
|
||||
|
||||
return nil
|
||||
}
|
@ -1,478 +0,0 @@
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/logging"
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/testdialer"
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
"google.golang.org/grpc"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
func TestActivateAsNode(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
peer1 := peer.Peer{PublicIP: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}}
|
||||
peer2 := peer.Peer{PublicIP: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}}
|
||||
sshUser1 := &ssh.UserKey{
|
||||
Username: "test-user-1",
|
||||
PublicKey: "ssh-rsa abcdefg",
|
||||
}
|
||||
sshUser2 := &ssh.UserKey{
|
||||
Username: "test-user-2",
|
||||
PublicKey: "ssh-ed25519 hijklmn",
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
initialPeers []peer.Peer
|
||||
updatedPeers []peer.Peer
|
||||
state state.State
|
||||
getUpdateErr error
|
||||
setVPNIPErr error
|
||||
messageSequenceOverride []string
|
||||
wantErr bool
|
||||
wantState state.State
|
||||
sshKeys []*ssh.UserKey
|
||||
}{
|
||||
"basic": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
wantState: state.NodeWaitingForClusterJoin,
|
||||
},
|
||||
"basic with SSH users": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
wantState: state.NodeWaitingForClusterJoin,
|
||||
sshKeys: []*ssh.UserKey{sshUser1, sshUser2},
|
||||
},
|
||||
"already activated": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.IsNode,
|
||||
wantErr: true,
|
||||
wantState: state.IsNode,
|
||||
},
|
||||
"wrong peer kind": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.ActivatingNodes,
|
||||
wantErr: true,
|
||||
wantState: state.ActivatingNodes,
|
||||
},
|
||||
"GetUpdate error": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
getUpdateErr: someErr,
|
||||
wantState: state.NodeWaitingForClusterJoin,
|
||||
},
|
||||
"SetVPNIP error": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
setVPNIPErr: someErr,
|
||||
wantErr: true,
|
||||
wantState: state.Failed,
|
||||
},
|
||||
"no messages sent to node": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
messageSequenceOverride: []string{},
|
||||
wantErr: true,
|
||||
wantState: state.AcceptingInit,
|
||||
},
|
||||
"only initial message sent to node": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
messageSequenceOverride: []string{"initialRequest"},
|
||||
wantErr: true,
|
||||
wantState: state.Failed,
|
||||
},
|
||||
"wrong initial message sent to node": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
messageSequenceOverride: []string{"stateDiskKey"},
|
||||
wantErr: true,
|
||||
wantState: state.AcceptingInit,
|
||||
},
|
||||
"initial message sent twice to node": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
messageSequenceOverride: []string{"initialRequest", "initialRequest"},
|
||||
wantErr: true,
|
||||
wantState: state.Failed,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
const (
|
||||
nodeIP = "192.0.2.2"
|
||||
nodeVPNIP = "10.118.0.2"
|
||||
)
|
||||
vpnPubKey := []byte{7, 8, 9}
|
||||
ownerID := []byte("ownerID")
|
||||
clusterID := []byte("clusterID")
|
||||
stateDiskKey := []byte("stateDiskKey")
|
||||
messageSequence := []string{"initialRequest", "stateDiskKey"}
|
||||
if tc.messageSequenceOverride != nil {
|
||||
messageSequence = tc.messageSequenceOverride
|
||||
}
|
||||
|
||||
logger := zaptest.NewLogger(t)
|
||||
fs := afero.NewMemMapFs()
|
||||
linuxUserManager := user.NewLinuxUserManagerFake(fs)
|
||||
cor := &fakeCore{state: tc.state, vpnPubKey: vpnPubKey, setVPNIPErr: tc.setVPNIPErr, linuxUserManager: linuxUserManager}
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, fakeValidator{}, netDialer)
|
||||
|
||||
api := New(logger, &logging.NopLogger{}, cor, dialer, nil, nil, nil)
|
||||
defer api.Close()
|
||||
|
||||
vserver := grpc.NewServer()
|
||||
vapi := &stubVPNAPI{peers: tc.updatedPeers, getUpdateErr: tc.getUpdateErr}
|
||||
vpnproto.RegisterAPIServer(vserver, vapi)
|
||||
go vserver.Serve(netDialer.GetListener(net.JoinHostPort("10.118.0.1", vpnAPIPort)))
|
||||
defer vserver.GracefulStop()
|
||||
|
||||
creds := atlscredentials.New(atls.NewFakeIssuer(oid.Dummy{}), nil)
|
||||
pubserver := grpc.NewServer(grpc.Creds(creds))
|
||||
pubproto.RegisterAPIServer(pubserver, api)
|
||||
go pubserver.Serve(netDialer.GetListener(net.JoinHostPort(nodeIP, endpointAVPNPort)))
|
||||
defer pubserver.GracefulStop()
|
||||
|
||||
_, nodeVPNPubKey, err := activateNode(require, netDialer, messageSequence, nodeIP, "9000", nodeVPNIP, peer.ToPubProto(tc.initialPeers), ownerID, clusterID, stateDiskKey, ssh.ToProtoSlice(tc.sshKeys))
|
||||
assert.Equal(tc.wantState, cor.state)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
assert.Equal(vpnPubKey, nodeVPNPubKey)
|
||||
assert.Equal(nodeVPNIP, cor.vpnIP)
|
||||
assert.Equal(ownerID, cor.ownerID)
|
||||
assert.Equal(clusterID, cor.clusterID)
|
||||
|
||||
api.Close() // blocks until update loop finished
|
||||
|
||||
if tc.getUpdateErr == nil {
|
||||
require.Len(cor.updatedPeers, 2)
|
||||
assert.Equal(tc.updatedPeers, cor.updatedPeers[1])
|
||||
} else {
|
||||
require.Len(cor.updatedPeers, 1)
|
||||
}
|
||||
assert.Equal(tc.initialPeers, cor.updatedPeers[0])
|
||||
assert.Equal([]role.Role{role.Node}, cor.persistNodeStateRoles)
|
||||
|
||||
// Test SSH user & key creation. Both cases: "supposed to add" and "not supposed to add"
|
||||
// This slightly differs from a real environment (e.g. missing /var/home) but should be fine in the stub context with a virtual file system
|
||||
if tc.sshKeys != nil {
|
||||
passwd := user.Passwd{}
|
||||
entries, err := passwd.Parse(fs)
|
||||
require.NoError(err)
|
||||
for _, singleEntry := range entries {
|
||||
username := singleEntry.Gecos
|
||||
_, err := fs.Stat(fmt.Sprintf("/var/home/%s/.ssh/authorized_keys.d/constellation-ssh-keys", username))
|
||||
assert.NoError(err)
|
||||
}
|
||||
} else {
|
||||
passwd := user.Passwd{}
|
||||
_, err := passwd.Parse(fs)
|
||||
assert.EqualError(err, "open /etc/passwd: file does not exist")
|
||||
_, err = fs.Stat("/var/home")
|
||||
assert.EqualError(err, "open /var/home: file does not exist")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTriggerNodeUpdate(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
peers := []peer.Peer{
|
||||
{PublicIP: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}},
|
||||
{PublicIP: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
peers []peer.Peer
|
||||
state state.State
|
||||
getUpdateErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"basic": {
|
||||
peers: peers,
|
||||
state: state.IsNode,
|
||||
},
|
||||
"not activated": {
|
||||
peers: peers,
|
||||
state: state.AcceptingInit,
|
||||
wantErr: true,
|
||||
},
|
||||
"wrong peer kind": {
|
||||
peers: peers,
|
||||
state: state.ActivatingNodes,
|
||||
wantErr: true,
|
||||
},
|
||||
"GetUpdate error": {
|
||||
peers: peers,
|
||||
state: state.IsNode,
|
||||
getUpdateErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
logger := zaptest.NewLogger(t)
|
||||
core := &fakeCore{state: tc.state}
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, fakeValidator{}, netDialer)
|
||||
|
||||
api := New(logger, &logging.NopLogger{}, core, dialer, nil, nil, nil)
|
||||
|
||||
vserver := grpc.NewServer()
|
||||
vapi := &stubVPNAPI{
|
||||
peers: tc.peers,
|
||||
getUpdateErr: tc.getUpdateErr,
|
||||
}
|
||||
vpnproto.RegisterAPIServer(vserver, vapi)
|
||||
go vserver.Serve(netDialer.GetListener(net.JoinHostPort("10.118.0.1", vpnAPIPort)))
|
||||
defer vserver.GracefulStop()
|
||||
|
||||
_, err := api.TriggerNodeUpdate(context.Background(), &pubproto.TriggerNodeUpdateRequest{})
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
// second update should be a noop
|
||||
_, err = api.TriggerNodeUpdate(context.Background(), &pubproto.TriggerNodeUpdateRequest{})
|
||||
require.NoError(err)
|
||||
|
||||
require.Len(core.updatedPeers, 1)
|
||||
assert.Equal(tc.peers, core.updatedPeers[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJoinCluster(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
|
||||
testCases := map[string]struct {
|
||||
state state.State
|
||||
getJoinArgsErr error
|
||||
joinClusterErr error
|
||||
wantErr bool
|
||||
wantState state.State
|
||||
}{
|
||||
"basic": {
|
||||
state: state.NodeWaitingForClusterJoin,
|
||||
wantState: state.IsNode,
|
||||
},
|
||||
"not activated": {
|
||||
state: state.AcceptingInit,
|
||||
wantErr: true,
|
||||
wantState: state.AcceptingInit,
|
||||
},
|
||||
"wrong peer kind": {
|
||||
state: state.ActivatingNodes,
|
||||
wantErr: true,
|
||||
wantState: state.ActivatingNodes,
|
||||
},
|
||||
"GetK8sJoinArgs error": {
|
||||
state: state.NodeWaitingForClusterJoin,
|
||||
getJoinArgsErr: someErr,
|
||||
wantErr: true,
|
||||
wantState: state.NodeWaitingForClusterJoin,
|
||||
},
|
||||
"JoinCluster error": {
|
||||
state: state.NodeWaitingForClusterJoin,
|
||||
joinClusterErr: someErr,
|
||||
wantErr: true,
|
||||
wantState: state.Failed,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
logger := zaptest.NewLogger(t)
|
||||
core := &fakeCore{state: tc.state, joinClusterErr: tc.joinClusterErr}
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := dialer.New(nil, fakeValidator{}, netDialer)
|
||||
|
||||
api := New(logger, &logging.NopLogger{}, core, dialer, nil, nil, nil)
|
||||
|
||||
vserver := grpc.NewServer()
|
||||
vapi := &stubVPNAPI{
|
||||
joinArgs: kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "endp",
|
||||
Token: "token",
|
||||
CACertHashes: []string{"dis"},
|
||||
},
|
||||
getJoinArgsErr: tc.getJoinArgsErr,
|
||||
}
|
||||
vpnproto.RegisterAPIServer(vserver, vapi)
|
||||
go vserver.Serve(netDialer.GetListener(net.JoinHostPort("192.0.2.1", vpnAPIPort)))
|
||||
defer vserver.GracefulStop()
|
||||
|
||||
_, err := api.JoinCluster(context.Background(), &pubproto.JoinClusterRequest{CoordinatorVpnIp: "192.0.2.1"})
|
||||
|
||||
assert.Equal(tc.wantState, core.state)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
assert.Equal([]kubeadm.BootstrapTokenDiscovery{vapi.joinArgs}, core.joinArgs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func activateNode(require *require.Assertions, dialer netDialer, messageSequence []string, nodeIP, bindPort, nodeVPNIP string, peers []*pubproto.Peer, ownerID, clusterID, stateDiskKey []byte, sshUserKeys []*pubproto.SSHUserKey) (string, []byte, error) {
|
||||
ctx := context.Background()
|
||||
conn, err := dialGRPC(ctx, dialer, net.JoinHostPort(nodeIP, bindPort))
|
||||
require.NoError(err)
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
stream, err := client.ActivateAsNode(ctx)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
for _, message := range messageSequence {
|
||||
switch message {
|
||||
case "initialRequest":
|
||||
err = stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||
Request: &pubproto.ActivateAsNodeRequest_InitialRequest{
|
||||
InitialRequest: &pubproto.ActivateAsNodeInitialRequest{
|
||||
NodeVpnIp: nodeVPNIP,
|
||||
Peers: peers,
|
||||
OwnerId: ownerID,
|
||||
ClusterId: clusterID,
|
||||
SshUserKeys: sshUserKeys,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
case "stateDiskKey":
|
||||
err = stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||
Request: &pubproto.ActivateAsNodeRequest_StateDiskKey{
|
||||
StateDiskKey: stateDiskKey,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
default:
|
||||
panic("unknown message in activation")
|
||||
}
|
||||
}
|
||||
require.NoError(stream.CloseSend())
|
||||
|
||||
diskUUIDReq, err := stream.Recv()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
diskUUID := diskUUIDReq.GetStateDiskUuid()
|
||||
|
||||
vpnPubKeyReq, err := stream.Recv()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
nodeVPNPubKey := vpnPubKeyReq.GetNodeVpnPubKey()
|
||||
|
||||
_, err = stream.Recv()
|
||||
if err != io.EOF {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return diskUUID, nodeVPNPubKey, nil
|
||||
}
|
||||
|
||||
func dialGRPC(ctx context.Context, dialer netDialer, target string) (*grpc.ClientConn, error) {
|
||||
creds := atlscredentials.New(nil, atls.NewFakeValidators(oid.Dummy{}))
|
||||
|
||||
return grpc.DialContext(ctx, target,
|
||||
grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return dialer.DialContext(ctx, "tcp", addr)
|
||||
}),
|
||||
grpc.WithTransportCredentials(creds),
|
||||
)
|
||||
}
|
||||
|
||||
type stubVPNAPI struct {
|
||||
peers []peer.Peer
|
||||
joinArgs kubeadm.BootstrapTokenDiscovery
|
||||
getUpdateErr error
|
||||
getJoinArgsErr error
|
||||
getK8SCertKeyErr error
|
||||
vpnproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func (a *stubVPNAPI) GetUpdate(ctx context.Context, in *vpnproto.GetUpdateRequest) (*vpnproto.GetUpdateResponse, error) {
|
||||
return &vpnproto.GetUpdateResponse{ResourceVersion: 1, Peers: peer.ToVPNProto(a.peers)}, a.getUpdateErr
|
||||
}
|
||||
|
||||
func (a *stubVPNAPI) GetK8SJoinArgs(ctx context.Context, in *vpnproto.GetK8SJoinArgsRequest) (*vpnproto.GetK8SJoinArgsResponse, error) {
|
||||
return &vpnproto.GetK8SJoinArgsResponse{
|
||||
ApiServerEndpoint: a.joinArgs.APIServerEndpoint,
|
||||
Token: a.joinArgs.Token,
|
||||
DiscoveryTokenCaCertHash: a.joinArgs.CACertHashes[0],
|
||||
}, a.getJoinArgsErr
|
||||
}
|
||||
|
||||
func (a *stubVPNAPI) GetK8SCertificateKey(ctx context.Context, in *vpnproto.GetK8SCertificateKeyRequest) (*vpnproto.GetK8SCertificateKeyResponse, error) {
|
||||
return &vpnproto.GetK8SCertificateKeyResponse{CertificateKey: "dummyCertKey"}, a.getK8SCertKeyErr
|
||||
}
|
||||
|
||||
func (a *stubVPNAPI) newServer() *grpc.Server {
|
||||
server := grpc.NewServer()
|
||||
vpnproto.RegisterAPIServer(server, a)
|
||||
return server
|
||||
}
|
||||
|
||||
type netDialer interface {
|
||||
DialContext(ctx context.Context, network, address string) (net.Conn, error)
|
||||
}
|
@ -1,117 +0,0 @@
|
||||
// Package pubapi implements the API that a peer exposes publicly.
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/logging"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/state/setup"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
const (
|
||||
deadlineDuration = 5 * time.Minute
|
||||
endpointAVPNPort = "9000"
|
||||
vpnAPIPort = "9027"
|
||||
updateInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
// API is the API.
|
||||
type API struct {
|
||||
mut sync.Mutex
|
||||
logger *zap.Logger
|
||||
cloudLogger logging.CloudLogger
|
||||
core Core
|
||||
dialer Dialer
|
||||
vpnAPIServer VPNAPIServer
|
||||
getPublicIPAddr GetIPAddrFunc
|
||||
stopUpdate chan struct{}
|
||||
wgClose sync.WaitGroup
|
||||
resourceVersion int
|
||||
peerFromContext PeerFromContextFunc
|
||||
pubproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
// New creates a new API.
|
||||
func New(logger *zap.Logger, cloudLogger logging.CloudLogger, core Core, dialer Dialer, vpnAPIServer VPNAPIServer, getPublicIPAddr GetIPAddrFunc, peerFromContext PeerFromContextFunc) *API {
|
||||
return &API{
|
||||
logger: logger,
|
||||
cloudLogger: cloudLogger,
|
||||
core: core,
|
||||
dialer: dialer,
|
||||
vpnAPIServer: vpnAPIServer,
|
||||
getPublicIPAddr: getPublicIPAddr,
|
||||
stopUpdate: make(chan struct{}, 1),
|
||||
peerFromContext: peerFromContext,
|
||||
}
|
||||
}
|
||||
|
||||
// GetState is the RPC call to get the peer's state.
|
||||
func (a *API) GetState(ctx context.Context, in *pubproto.GetStateRequest) (*pubproto.GetStateResponse, error) {
|
||||
return &pubproto.GetStateResponse{State: uint32(a.core.GetState())}, nil
|
||||
}
|
||||
|
||||
// StartVPNAPIServer starts the VPN-API server.
|
||||
func (a *API) StartVPNAPIServer(vpnIP string) error {
|
||||
if err := a.vpnAPIServer.Listen(net.JoinHostPort(vpnIP, vpnAPIPort)); err != nil {
|
||||
return fmt.Errorf("start vpnAPIServer: %v", err)
|
||||
}
|
||||
a.wgClose.Add(1)
|
||||
go func() {
|
||||
defer a.wgClose.Done()
|
||||
if err := a.vpnAPIServer.Serve(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the API.
|
||||
func (a *API) Close() {
|
||||
a.stopUpdate <- struct{}{}
|
||||
if a.vpnAPIServer != nil {
|
||||
a.vpnAPIServer.Close()
|
||||
}
|
||||
a.wgClose.Wait()
|
||||
}
|
||||
|
||||
type VPNAPIServer interface {
|
||||
Listen(endpoint string) error
|
||||
Serve() error
|
||||
Close()
|
||||
}
|
||||
|
||||
type GetIPAddrFunc func() (string, error)
|
||||
|
||||
// PeerFromContextFunc returns a peer endpoint (IP:port) from a given context.
|
||||
type PeerFromContextFunc func(context.Context) (string, error)
|
||||
|
||||
// GetRecoveryPeerFromContext returns the context's IP joined with the Coordinator's default port.
|
||||
func GetRecoveryPeerFromContext(ctx context.Context) (string, error) {
|
||||
peer, ok := peer.FromContext(ctx)
|
||||
if !ok {
|
||||
return "", errors.New("unable to get peer from context")
|
||||
}
|
||||
|
||||
peerIP, _, err := net.SplitHostPort(peer.Addr.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return net.JoinHostPort(peerIP, setup.RecoveryPort), nil
|
||||
}
|
||||
|
||||
// Dialer can open grpc client connections with different levels of ATLS encryption / verification.
|
||||
type Dialer interface {
|
||||
Dial(ctx context.Context, target string) (*grpc.ClientConn, error)
|
||||
DialInsecure(ctx context.Context, target string) (*grpc.ClientConn, error)
|
||||
DialNoVerify(ctx context.Context, target string) (*grpc.ClientConn, error)
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/goleak"
|
||||
grpcpeer "google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m,
|
||||
// https://github.com/census-instrumentation/opencensus-go/issues/1262
|
||||
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
|
||||
)
|
||||
}
|
||||
|
||||
func TestGetRecoveryPeerFromContext(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
testIP := "192.0.2.1"
|
||||
testPort := 1234
|
||||
wantPeer := net.JoinHostPort(testIP, "9000")
|
||||
|
||||
addr := &net.TCPAddr{IP: net.ParseIP(testIP), Port: testPort}
|
||||
ctx := grpcpeer.NewContext(context.Background(), &grpcpeer.Peer{Addr: addr})
|
||||
|
||||
peer, err := GetRecoveryPeerFromContext(ctx)
|
||||
assert.NoError(err)
|
||||
assert.Equal(wantPeer, peer)
|
||||
|
||||
_, err = GetRecoveryPeerFromContext(context.Background())
|
||||
assert.Error(err)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,164 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package pubapi;
|
||||
|
||||
option go_package = "github.com/edgelesssys/constellation/coordinator/pubapi/pubproto";
|
||||
|
||||
service API {
|
||||
rpc GetState(GetStateRequest) returns (GetStateResponse);
|
||||
rpc ActivateAsCoordinator(ActivateAsCoordinatorRequest) returns (stream ActivateAsCoordinatorResponse);
|
||||
rpc ActivateAsNode(stream ActivateAsNodeRequest) returns (stream ActivateAsNodeResponse);
|
||||
rpc ActivateAdditionalNodes(ActivateAdditionalNodesRequest) returns (stream ActivateAdditionalNodesResponse);
|
||||
rpc ActivateAsAdditionalCoordinator(ActivateAsAdditionalCoordinatorRequest) returns (ActivateAsAdditionalCoordinatorResponse);
|
||||
rpc ActivateAdditionalCoordinator(ActivateAdditionalCoordinatorRequest) returns (ActivateAdditionalCoordinatorResponse);
|
||||
rpc JoinCluster(JoinClusterRequest) returns (JoinClusterResponse);
|
||||
rpc TriggerNodeUpdate(TriggerNodeUpdateRequest) returns (TriggerNodeUpdateResponse);
|
||||
rpc TriggerCoordinatorUpdate(TriggerCoordinatorUpdateRequest) returns (TriggerCoordinatorUpdateResponse);
|
||||
rpc GetPeerVPNPublicKey(GetPeerVPNPublicKeyRequest) returns (GetPeerVPNPublicKeyResponse);
|
||||
rpc GetVPNPeers(GetVPNPeersRequest) returns (GetVPNPeersResponse);
|
||||
rpc RequestStateDiskKey(RequestStateDiskKeyRequest) returns (RequestStateDiskKeyResponse);
|
||||
}
|
||||
|
||||
message GetStateRequest {
|
||||
}
|
||||
|
||||
message GetStateResponse {
|
||||
uint32 state = 1;
|
||||
}
|
||||
|
||||
message ActivateAsCoordinatorRequest {
|
||||
bytes admin_vpn_pub_key = 1;
|
||||
repeated string node_public_ips = 2;
|
||||
repeated string coordinator_public_ips = 3;
|
||||
repeated string autoscaling_node_groups = 4;
|
||||
bytes master_secret = 5;
|
||||
string kms_uri = 6;
|
||||
string storage_uri = 7;
|
||||
string key_encryption_key_id = 8;
|
||||
bool use_existing_kek = 9;
|
||||
string cloud_service_account_uri = 10;
|
||||
repeated SSHUserKey ssh_user_keys = 11;
|
||||
}
|
||||
|
||||
message ActivateAsCoordinatorResponse {
|
||||
oneof content {
|
||||
AdminConfig admin_config = 1;
|
||||
Log log = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ActivateAsNodeRequest {
|
||||
oneof request {
|
||||
ActivateAsNodeInitialRequest initial_request = 1;
|
||||
bytes state_disk_key = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ActivateAsNodeInitialRequest {
|
||||
string node_vpn_ip = 1;
|
||||
repeated Peer peers = 2;
|
||||
bytes owner_id = 3;
|
||||
bytes cluster_id = 4;
|
||||
repeated SSHUserKey ssh_user_keys = 5;
|
||||
}
|
||||
|
||||
message ActivateAsNodeResponse {
|
||||
oneof response {
|
||||
bytes node_vpn_pub_key = 1;
|
||||
string state_disk_uuid = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ActivateAdditionalNodesRequest {
|
||||
repeated string node_public_ips = 1;
|
||||
repeated SSHUserKey ssh_user_keys = 2;
|
||||
}
|
||||
|
||||
message ActivateAdditionalNodesResponse {
|
||||
Log log = 1;
|
||||
}
|
||||
|
||||
message ActivateAsAdditionalCoordinatorRequest {
|
||||
string assigned_vpn_ip = 1;
|
||||
Peer activating_coordinator_data = 2;
|
||||
repeated Peer peers = 3;
|
||||
bytes owner_id = 4;
|
||||
bytes cluster_id = 5;
|
||||
repeated SSHUserKey ssh_user_keys = 6;
|
||||
}
|
||||
|
||||
message ActivateAsAdditionalCoordinatorResponse {
|
||||
}
|
||||
|
||||
message ActivateAdditionalCoordinatorRequest {
|
||||
string coordinator_public_ip = 1;
|
||||
repeated SSHUserKey ssh_user_keys = 2;
|
||||
}
|
||||
|
||||
message ActivateAdditionalCoordinatorResponse {
|
||||
|
||||
}
|
||||
|
||||
message JoinClusterRequest {
|
||||
string coordinator_vpn_ip = 1;
|
||||
}
|
||||
|
||||
message JoinClusterResponse {
|
||||
}
|
||||
|
||||
message TriggerNodeUpdateRequest {
|
||||
}
|
||||
|
||||
message TriggerNodeUpdateResponse {
|
||||
}
|
||||
|
||||
message TriggerCoordinatorUpdateRequest {
|
||||
}
|
||||
|
||||
message TriggerCoordinatorUpdateResponse {
|
||||
}
|
||||
|
||||
message RequestStateDiskKeyRequest {
|
||||
string disk_uuid = 1;
|
||||
}
|
||||
|
||||
message RequestStateDiskKeyResponse {
|
||||
}
|
||||
|
||||
message GetPeerVPNPublicKeyRequest {
|
||||
}
|
||||
|
||||
message GetPeerVPNPublicKeyResponse {
|
||||
bytes coordinator_pub_key = 1;
|
||||
}
|
||||
|
||||
message GetVPNPeersRequest {
|
||||
}
|
||||
|
||||
message GetVPNPeersResponse {
|
||||
repeated Peer peers = 1;
|
||||
}
|
||||
|
||||
message AdminConfig {
|
||||
string admin_vpn_ip = 1;
|
||||
bytes coordinator_vpn_pub_key = 2;
|
||||
bytes kubeconfig = 3;
|
||||
bytes owner_id = 4;
|
||||
bytes cluster_id = 5;
|
||||
}
|
||||
|
||||
message Log {
|
||||
string message = 1;
|
||||
}
|
||||
|
||||
message Peer {
|
||||
string public_ip = 1;
|
||||
string vpn_ip = 2;
|
||||
bytes vpn_pub_key = 3;
|
||||
uint32 role = 4;
|
||||
}
|
||||
|
||||
message SSHUserKey {
|
||||
string username = 1;
|
||||
string public_key = 2;
|
||||
}
|
@ -1,588 +0,0 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.20.1
|
||||
// source: pubapi.proto
|
||||
|
||||
package pubproto
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// APIClient is the client API for API service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type APIClient interface {
|
||||
GetState(ctx context.Context, in *GetStateRequest, opts ...grpc.CallOption) (*GetStateResponse, error)
|
||||
ActivateAsCoordinator(ctx context.Context, in *ActivateAsCoordinatorRequest, opts ...grpc.CallOption) (API_ActivateAsCoordinatorClient, error)
|
||||
ActivateAsNode(ctx context.Context, opts ...grpc.CallOption) (API_ActivateAsNodeClient, error)
|
||||
ActivateAdditionalNodes(ctx context.Context, in *ActivateAdditionalNodesRequest, opts ...grpc.CallOption) (API_ActivateAdditionalNodesClient, error)
|
||||
ActivateAsAdditionalCoordinator(ctx context.Context, in *ActivateAsAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAsAdditionalCoordinatorResponse, error)
|
||||
ActivateAdditionalCoordinator(ctx context.Context, in *ActivateAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAdditionalCoordinatorResponse, error)
|
||||
JoinCluster(ctx context.Context, in *JoinClusterRequest, opts ...grpc.CallOption) (*JoinClusterResponse, error)
|
||||
TriggerNodeUpdate(ctx context.Context, in *TriggerNodeUpdateRequest, opts ...grpc.CallOption) (*TriggerNodeUpdateResponse, error)
|
||||
TriggerCoordinatorUpdate(ctx context.Context, in *TriggerCoordinatorUpdateRequest, opts ...grpc.CallOption) (*TriggerCoordinatorUpdateResponse, error)
|
||||
GetPeerVPNPublicKey(ctx context.Context, in *GetPeerVPNPublicKeyRequest, opts ...grpc.CallOption) (*GetPeerVPNPublicKeyResponse, error)
|
||||
GetVPNPeers(ctx context.Context, in *GetVPNPeersRequest, opts ...grpc.CallOption) (*GetVPNPeersResponse, error)
|
||||
RequestStateDiskKey(ctx context.Context, in *RequestStateDiskKeyRequest, opts ...grpc.CallOption) (*RequestStateDiskKeyResponse, error)
|
||||
}
|
||||
|
||||
type aPIClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewAPIClient(cc grpc.ClientConnInterface) APIClient {
|
||||
return &aPIClient{cc}
|
||||
}
|
||||
|
||||
func (c *aPIClient) GetState(ctx context.Context, in *GetStateRequest, opts ...grpc.CallOption) (*GetStateResponse, error) {
|
||||
out := new(GetStateResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/GetState", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) ActivateAsCoordinator(ctx context.Context, in *ActivateAsCoordinatorRequest, opts ...grpc.CallOption) (API_ActivateAsCoordinatorClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &API_ServiceDesc.Streams[0], "/pubapi.API/ActivateAsCoordinator", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &aPIActivateAsCoordinatorClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type API_ActivateAsCoordinatorClient interface {
|
||||
Recv() (*ActivateAsCoordinatorResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type aPIActivateAsCoordinatorClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *aPIActivateAsCoordinatorClient) Recv() (*ActivateAsCoordinatorResponse, error) {
|
||||
m := new(ActivateAsCoordinatorResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) ActivateAsNode(ctx context.Context, opts ...grpc.CallOption) (API_ActivateAsNodeClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &API_ServiceDesc.Streams[1], "/pubapi.API/ActivateAsNode", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &aPIActivateAsNodeClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type API_ActivateAsNodeClient interface {
|
||||
Send(*ActivateAsNodeRequest) error
|
||||
Recv() (*ActivateAsNodeResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type aPIActivateAsNodeClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *aPIActivateAsNodeClient) Send(m *ActivateAsNodeRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *aPIActivateAsNodeClient) Recv() (*ActivateAsNodeResponse, error) {
|
||||
m := new(ActivateAsNodeResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) ActivateAdditionalNodes(ctx context.Context, in *ActivateAdditionalNodesRequest, opts ...grpc.CallOption) (API_ActivateAdditionalNodesClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &API_ServiceDesc.Streams[2], "/pubapi.API/ActivateAdditionalNodes", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &aPIActivateAdditionalNodesClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type API_ActivateAdditionalNodesClient interface {
|
||||
Recv() (*ActivateAdditionalNodesResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type aPIActivateAdditionalNodesClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *aPIActivateAdditionalNodesClient) Recv() (*ActivateAdditionalNodesResponse, error) {
|
||||
m := new(ActivateAdditionalNodesResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) ActivateAsAdditionalCoordinator(ctx context.Context, in *ActivateAsAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAsAdditionalCoordinatorResponse, error) {
|
||||
out := new(ActivateAsAdditionalCoordinatorResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/ActivateAsAdditionalCoordinator", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) ActivateAdditionalCoordinator(ctx context.Context, in *ActivateAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAdditionalCoordinatorResponse, error) {
|
||||
out := new(ActivateAdditionalCoordinatorResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/ActivateAdditionalCoordinator", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) JoinCluster(ctx context.Context, in *JoinClusterRequest, opts ...grpc.CallOption) (*JoinClusterResponse, error) {
|
||||
out := new(JoinClusterResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/JoinCluster", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) TriggerNodeUpdate(ctx context.Context, in *TriggerNodeUpdateRequest, opts ...grpc.CallOption) (*TriggerNodeUpdateResponse, error) {
|
||||
out := new(TriggerNodeUpdateResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/TriggerNodeUpdate", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) TriggerCoordinatorUpdate(ctx context.Context, in *TriggerCoordinatorUpdateRequest, opts ...grpc.CallOption) (*TriggerCoordinatorUpdateResponse, error) {
|
||||
out := new(TriggerCoordinatorUpdateResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/TriggerCoordinatorUpdate", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) GetPeerVPNPublicKey(ctx context.Context, in *GetPeerVPNPublicKeyRequest, opts ...grpc.CallOption) (*GetPeerVPNPublicKeyResponse, error) {
|
||||
out := new(GetPeerVPNPublicKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/GetPeerVPNPublicKey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) GetVPNPeers(ctx context.Context, in *GetVPNPeersRequest, opts ...grpc.CallOption) (*GetVPNPeersResponse, error) {
|
||||
out := new(GetVPNPeersResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/GetVPNPeers", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) RequestStateDiskKey(ctx context.Context, in *RequestStateDiskKeyRequest, opts ...grpc.CallOption) (*RequestStateDiskKeyResponse, error) {
|
||||
out := new(RequestStateDiskKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/RequestStateDiskKey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// APIServer is the server API for API service.
|
||||
// All implementations must embed UnimplementedAPIServer
|
||||
// for forward compatibility
|
||||
type APIServer interface {
|
||||
GetState(context.Context, *GetStateRequest) (*GetStateResponse, error)
|
||||
ActivateAsCoordinator(*ActivateAsCoordinatorRequest, API_ActivateAsCoordinatorServer) error
|
||||
ActivateAsNode(API_ActivateAsNodeServer) error
|
||||
ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error
|
||||
ActivateAsAdditionalCoordinator(context.Context, *ActivateAsAdditionalCoordinatorRequest) (*ActivateAsAdditionalCoordinatorResponse, error)
|
||||
ActivateAdditionalCoordinator(context.Context, *ActivateAdditionalCoordinatorRequest) (*ActivateAdditionalCoordinatorResponse, error)
|
||||
JoinCluster(context.Context, *JoinClusterRequest) (*JoinClusterResponse, error)
|
||||
TriggerNodeUpdate(context.Context, *TriggerNodeUpdateRequest) (*TriggerNodeUpdateResponse, error)
|
||||
TriggerCoordinatorUpdate(context.Context, *TriggerCoordinatorUpdateRequest) (*TriggerCoordinatorUpdateResponse, error)
|
||||
GetPeerVPNPublicKey(context.Context, *GetPeerVPNPublicKeyRequest) (*GetPeerVPNPublicKeyResponse, error)
|
||||
GetVPNPeers(context.Context, *GetVPNPeersRequest) (*GetVPNPeersResponse, error)
|
||||
RequestStateDiskKey(context.Context, *RequestStateDiskKeyRequest) (*RequestStateDiskKeyResponse, error)
|
||||
mustEmbedUnimplementedAPIServer()
|
||||
}
|
||||
|
||||
// UnimplementedAPIServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedAPIServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedAPIServer) GetState(context.Context, *GetStateRequest) (*GetStateResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetState not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) ActivateAsCoordinator(*ActivateAsCoordinatorRequest, API_ActivateAsCoordinatorServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method ActivateAsCoordinator not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) ActivateAsNode(API_ActivateAsNodeServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method ActivateAsNode not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method ActivateAdditionalNodes not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) ActivateAsAdditionalCoordinator(context.Context, *ActivateAsAdditionalCoordinatorRequest) (*ActivateAsAdditionalCoordinatorResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ActivateAsAdditionalCoordinator not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) ActivateAdditionalCoordinator(context.Context, *ActivateAdditionalCoordinatorRequest) (*ActivateAdditionalCoordinatorResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ActivateAdditionalCoordinator not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) JoinCluster(context.Context, *JoinClusterRequest) (*JoinClusterResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method JoinCluster not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) TriggerNodeUpdate(context.Context, *TriggerNodeUpdateRequest) (*TriggerNodeUpdateResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method TriggerNodeUpdate not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) TriggerCoordinatorUpdate(context.Context, *TriggerCoordinatorUpdateRequest) (*TriggerCoordinatorUpdateResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method TriggerCoordinatorUpdate not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) GetPeerVPNPublicKey(context.Context, *GetPeerVPNPublicKeyRequest) (*GetPeerVPNPublicKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetPeerVPNPublicKey not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) GetVPNPeers(context.Context, *GetVPNPeersRequest) (*GetVPNPeersResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetVPNPeers not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) RequestStateDiskKey(context.Context, *RequestStateDiskKeyRequest) (*RequestStateDiskKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RequestStateDiskKey not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) mustEmbedUnimplementedAPIServer() {}
|
||||
|
||||
// UnsafeAPIServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to APIServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeAPIServer interface {
|
||||
mustEmbedUnimplementedAPIServer()
|
||||
}
|
||||
|
||||
func RegisterAPIServer(s grpc.ServiceRegistrar, srv APIServer) {
|
||||
s.RegisterService(&API_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _API_GetState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetStateRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).GetState(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/GetState",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).GetState(ctx, req.(*GetStateRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_ActivateAsCoordinator_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(ActivateAsCoordinatorRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(APIServer).ActivateAsCoordinator(m, &aPIActivateAsCoordinatorServer{stream})
|
||||
}
|
||||
|
||||
type API_ActivateAsCoordinatorServer interface {
|
||||
Send(*ActivateAsCoordinatorResponse) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type aPIActivateAsCoordinatorServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *aPIActivateAsCoordinatorServer) Send(m *ActivateAsCoordinatorResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _API_ActivateAsNode_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(APIServer).ActivateAsNode(&aPIActivateAsNodeServer{stream})
|
||||
}
|
||||
|
||||
type API_ActivateAsNodeServer interface {
|
||||
Send(*ActivateAsNodeResponse) error
|
||||
Recv() (*ActivateAsNodeRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type aPIActivateAsNodeServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *aPIActivateAsNodeServer) Send(m *ActivateAsNodeResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *aPIActivateAsNodeServer) Recv() (*ActivateAsNodeRequest, error) {
|
||||
m := new(ActivateAsNodeRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _API_ActivateAdditionalNodes_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(ActivateAdditionalNodesRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(APIServer).ActivateAdditionalNodes(m, &aPIActivateAdditionalNodesServer{stream})
|
||||
}
|
||||
|
||||
type API_ActivateAdditionalNodesServer interface {
|
||||
Send(*ActivateAdditionalNodesResponse) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type aPIActivateAdditionalNodesServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *aPIActivateAdditionalNodesServer) Send(m *ActivateAdditionalNodesResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _API_ActivateAsAdditionalCoordinator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ActivateAsAdditionalCoordinatorRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).ActivateAsAdditionalCoordinator(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/ActivateAsAdditionalCoordinator",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).ActivateAsAdditionalCoordinator(ctx, req.(*ActivateAsAdditionalCoordinatorRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_ActivateAdditionalCoordinator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ActivateAdditionalCoordinatorRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).ActivateAdditionalCoordinator(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/ActivateAdditionalCoordinator",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).ActivateAdditionalCoordinator(ctx, req.(*ActivateAdditionalCoordinatorRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_JoinCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(JoinClusterRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).JoinCluster(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/JoinCluster",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).JoinCluster(ctx, req.(*JoinClusterRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_TriggerNodeUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(TriggerNodeUpdateRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).TriggerNodeUpdate(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/TriggerNodeUpdate",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).TriggerNodeUpdate(ctx, req.(*TriggerNodeUpdateRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_TriggerCoordinatorUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(TriggerCoordinatorUpdateRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).TriggerCoordinatorUpdate(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/TriggerCoordinatorUpdate",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).TriggerCoordinatorUpdate(ctx, req.(*TriggerCoordinatorUpdateRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_GetPeerVPNPublicKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetPeerVPNPublicKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).GetPeerVPNPublicKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/GetPeerVPNPublicKey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).GetPeerVPNPublicKey(ctx, req.(*GetPeerVPNPublicKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_GetVPNPeers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetVPNPeersRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).GetVPNPeers(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/GetVPNPeers",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).GetVPNPeers(ctx, req.(*GetVPNPeersRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_RequestStateDiskKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RequestStateDiskKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).RequestStateDiskKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/RequestStateDiskKey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).RequestStateDiskKey(ctx, req.(*RequestStateDiskKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// API_ServiceDesc is the grpc.ServiceDesc for API service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var API_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "pubapi.API",
|
||||
HandlerType: (*APIServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "GetState",
|
||||
Handler: _API_GetState_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ActivateAsAdditionalCoordinator",
|
||||
Handler: _API_ActivateAsAdditionalCoordinator_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ActivateAdditionalCoordinator",
|
||||
Handler: _API_ActivateAdditionalCoordinator_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "JoinCluster",
|
||||
Handler: _API_JoinCluster_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "TriggerNodeUpdate",
|
||||
Handler: _API_TriggerNodeUpdate_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "TriggerCoordinatorUpdate",
|
||||
Handler: _API_TriggerCoordinatorUpdate_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetPeerVPNPublicKey",
|
||||
Handler: _API_GetPeerVPNPublicKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetVPNPeers",
|
||||
Handler: _API_GetVPNPeers_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RequestStateDiskKey",
|
||||
Handler: _API_RequestStateDiskKey_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "ActivateAsCoordinator",
|
||||
Handler: _API_ActivateAsCoordinator_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "ActivateAsNode",
|
||||
Handler: _API_ActivateAsNode_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "ActivateAdditionalNodes",
|
||||
Handler: _API_ActivateAdditionalNodes_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "pubapi.proto",
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// GetVPNPeers retrieves VPN peers from a coordinator.
|
||||
func (a *API) GetVPNPeers(context.Context, *pubproto.GetVPNPeersRequest) (*pubproto.GetVPNPeersResponse, error) {
|
||||
_, peers, err := a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "getting peers: %v", err)
|
||||
}
|
||||
|
||||
return &pubproto.GetVPNPeersResponse{
|
||||
Peers: peer.ToPubProto(peers),
|
||||
}, nil
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
package pubapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/logging"
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func TestGetVPNPeers(t *testing.T) {
|
||||
wantedPeers := []peer.Peer{
|
||||
{
|
||||
PublicIP: "192.0.2.1",
|
||||
VPNIP: "10.118.0.1",
|
||||
VPNPubKey: []byte{0x1, 0x2, 0x3},
|
||||
Role: role.Coordinator,
|
||||
},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
coreGetPeersErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"GetVPNPeers works": {},
|
||||
"GetVPNPeers fails if core cannot retrieve VPN peers": {
|
||||
coreGetPeersErr: errors.New("failed to get peers"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
logger := zaptest.NewLogger(t)
|
||||
cor := &fakeCore{peers: wantedPeers, GetPeersErr: tc.coreGetPeersErr}
|
||||
api := New(logger, &logging.NopLogger{}, cor, nil, nil, nil, nil)
|
||||
defer api.Close()
|
||||
resp, err := api.GetVPNPeers(context.Background(), &pubproto.GetVPNPeersRequest{})
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
peers := peer.FromPubProto(resp.Peers)
|
||||
assert.Equal(wantedPeers, peers)
|
||||
})
|
||||
}
|
||||
}
|
@ -1,51 +0,0 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
//go:generate stringer -type=State
|
||||
|
||||
// State is a peer's state.
|
||||
//
|
||||
// State's methods are thread safe. Get the State's value using the Get method if
|
||||
// you're accessing it concurrently. Otherwise, you may access it directly.
|
||||
type State uint32
|
||||
|
||||
const (
|
||||
Uninitialized State = iota
|
||||
AcceptingInit
|
||||
ActivatingNodes
|
||||
NodeWaitingForClusterJoin
|
||||
IsNode
|
||||
Failed
|
||||
maxState
|
||||
)
|
||||
|
||||
// State's methods should be thread safe. As we only need to protect
|
||||
// one primitive value, we can use atomic operations.
|
||||
|
||||
// Get gets the state in a thread-safe manner.
|
||||
func (s *State) Get() State {
|
||||
return State(atomic.LoadUint32((*uint32)(s)))
|
||||
}
|
||||
|
||||
// Require checks if the state is one of the desired ones and returns an error otherwise.
|
||||
func (s *State) Require(states ...State) error {
|
||||
this := s.Get()
|
||||
for _, st := range states {
|
||||
if st == this {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("server is not in expected state: require one of %v, but this is %v", states, this)
|
||||
}
|
||||
|
||||
// Advance advances the state.
|
||||
func (s *State) Advance(newState State) {
|
||||
curState := State(atomic.SwapUint32((*uint32)(s), uint32(newState)))
|
||||
if !(curState < newState && newState < maxState) {
|
||||
panic(fmt.Errorf("cannot advance from %v to %v", curState, newState))
|
||||
}
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
// Code generated by "stringer -type=State"; DO NOT EDIT.
|
||||
|
||||
package state
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[Uninitialized-0]
|
||||
_ = x[AcceptingInit-1]
|
||||
_ = x[ActivatingNodes-2]
|
||||
_ = x[NodeWaitingForClusterJoin-3]
|
||||
_ = x[IsNode-4]
|
||||
_ = x[Failed-5]
|
||||
_ = x[maxState-6]
|
||||
}
|
||||
|
||||
const _State_name = "UninitializedAcceptingInitActivatingNodesNodeWaitingForClusterJoinIsNodeFailedmaxState"
|
||||
|
||||
var _State_index = [...]uint8{0, 13, 26, 41, 66, 72, 78, 86}
|
||||
|
||||
func (i State) String() string {
|
||||
if i >= State(len(_State_index)-1) {
|
||||
return "State(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _State_name[_State_index[i]:_State_index[i+1]]
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/goleak"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestState(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var st State
|
||||
assert.Equal(Uninitialized, st)
|
||||
assert.Equal(Uninitialized, st.Get())
|
||||
assert.NoError(st.Require(Uninitialized))
|
||||
assert.Error(st.Require(AcceptingInit))
|
||||
|
||||
st.Advance(AcceptingInit)
|
||||
assert.Equal(AcceptingInit, st)
|
||||
assert.Equal(AcceptingInit, st.Get())
|
||||
assert.Error(st.Require(Uninitialized))
|
||||
assert.NoError(st.Require(AcceptingInit))
|
||||
|
||||
assert.Panics(func() { st.Advance(Uninitialized) })
|
||||
}
|
@ -1,280 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// TODO: Generate certificates for the coordinator.
|
||||
const (
|
||||
peerCertFilepath = "/etc/kubernetes/pki/etcd/peer.crt"
|
||||
keyFilepath = "/etc/kubernetes/pki/etcd/peer.key"
|
||||
caCertFilepath = "/etc/kubernetes/pki/etcd/server.crt"
|
||||
etcdPrefix = "constellationRegion"
|
||||
dialTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
type EtcdStore struct {
|
||||
client *clientv3.Client
|
||||
}
|
||||
|
||||
func NewEtcdStore(endpoint string, forceTls bool, logger *zap.Logger) (*EtcdStore, error) {
|
||||
var tlsConfig *tls.Config
|
||||
if forceTls {
|
||||
caCert, err := os.ReadFile(caCertFilepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(peerCertFilepath, keyFilepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tlsConfig = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: caCertPool,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
}
|
||||
|
||||
// Blocks until connection is up
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
DialTimeout: dialTimeout,
|
||||
Endpoints: []string{endpoint},
|
||||
TLS: tlsConfig,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
Logger: logger,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &EtcdStore{client: cli}, nil
|
||||
}
|
||||
|
||||
// Get retrieves a value from EtcdStore by Type and Name.
|
||||
func (s *EtcdStore) Get(request string) ([]byte, error) {
|
||||
values, err := s.client.Get(context.TODO(), etcdPrefix+request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if values.Count == 0 {
|
||||
return nil, &ValueUnsetError{requestedValue: request}
|
||||
}
|
||||
if values.Count == 1 {
|
||||
return values.Kvs[0].Value, nil
|
||||
}
|
||||
return nil, fmt.Errorf("got multiple entries for key [%s] in etcd", request)
|
||||
}
|
||||
|
||||
// Put saves a value in EtcdStore by Type and Name.
|
||||
func (s *EtcdStore) Put(request string, requestData []byte) error {
|
||||
_, err := s.client.Put(context.TODO(), etcdPrefix+request, string(requestData))
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterator returns an Iterator for a given prefix.
|
||||
func (s *EtcdStore) Iterator(prefix string) (Iterator, error) {
|
||||
resp, err := s.client.Get(context.TODO(), etcdPrefix+prefix, clientv3.WithPrefix(), clientv3.WithKeysOnly())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys := make([]string, 0, len(resp.Kvs))
|
||||
for _, kv := range resp.Kvs {
|
||||
key := strings.TrimPrefix(string(kv.Key), etcdPrefix)
|
||||
keys = append(keys, key)
|
||||
}
|
||||
output := &EtcdIterator{keys: keys}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// TODO: Implement this function, currently this function is never called.
|
||||
func (s *EtcdStore) Transfer(store Store) error {
|
||||
panic("etcd store Transfer() function not implemented, should never be called")
|
||||
}
|
||||
|
||||
// Delete deletes the store entry with the given key.
|
||||
func (s *EtcdStore) Delete(key string) error {
|
||||
_, err := s.client.Delete(context.TODO(), etcdPrefix+key)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *EtcdStore) BeginTransaction() (Transaction, error) {
|
||||
sess, err := concurrency.NewSession(s.client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mut := concurrency.NewLocker(sess, etcdPrefix)
|
||||
mut.Lock()
|
||||
|
||||
return &EtcdTransaction{
|
||||
store: s,
|
||||
dataInsert: map[string][]byte{},
|
||||
dataDelete: map[string]struct{}{},
|
||||
ongoingTransaction: true,
|
||||
mut: mut,
|
||||
session: sess,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the etcd client.
|
||||
func (s *EtcdStore) Close() error {
|
||||
return s.client.Close()
|
||||
}
|
||||
|
||||
type EtcdTransaction struct {
|
||||
store *EtcdStore
|
||||
dataInsert map[string][]byte
|
||||
dataDelete map[string]struct{}
|
||||
ongoingTransaction bool
|
||||
session *concurrency.Session
|
||||
mut sync.Locker
|
||||
}
|
||||
|
||||
func (t *EtcdTransaction) Get(request string) ([]byte, error) {
|
||||
if !t.ongoingTransaction {
|
||||
return nil, &TransactionAlreadyCommittedError{op: "Get"}
|
||||
}
|
||||
if value, ok := t.dataInsert[request]; ok {
|
||||
return value, nil
|
||||
}
|
||||
if _, ok := t.dataDelete[request]; ok {
|
||||
return nil, &ValueUnsetError{requestedValue: request}
|
||||
}
|
||||
return t.store.Get(request)
|
||||
}
|
||||
|
||||
// Put saves a value.
|
||||
func (t *EtcdTransaction) Put(request string, requestData []byte) error {
|
||||
if !t.ongoingTransaction {
|
||||
return &TransactionAlreadyCommittedError{op: "Put"}
|
||||
}
|
||||
t.dataInsert[request] = requestData
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes the key if it exists. Only errors if there is no ongoing Transaction.
|
||||
func (t *EtcdTransaction) Delete(key string) error {
|
||||
if !t.ongoingTransaction {
|
||||
return &TransactionAlreadyCommittedError{op: "Delete"}
|
||||
}
|
||||
delete(t.dataInsert, key)
|
||||
t.dataDelete[key] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterator returns an iterator for all keys in the transaction with a given prefix.
|
||||
func (t *EtcdTransaction) Iterator(prefix string) (Iterator, error) {
|
||||
resp, err := t.store.client.Get(context.TODO(), etcdPrefix+prefix, clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keys []string
|
||||
for _, v := range resp.Kvs {
|
||||
key := strings.TrimPrefix(string(v.Key), etcdPrefix)
|
||||
if _, ok := t.dataDelete[key]; !ok {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
for k := range t.dataInsert {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
output := &EtcdIterator{idx: 0, keys: keys}
|
||||
return output, err
|
||||
}
|
||||
|
||||
// Commit ends a transaction and persists the changes.
|
||||
func (t *EtcdTransaction) Commit() error {
|
||||
if !t.ongoingTransaction {
|
||||
return fmt.Errorf("no ongoing transaction")
|
||||
}
|
||||
|
||||
ops := make([]clientv3.Op, 0, len(t.dataInsert)+len(t.dataDelete))
|
||||
// add all transactions into one object; for future
|
||||
// implementations, we can also atomically delete elements
|
||||
// however, it's not compatible with stdstore atm
|
||||
for k, v := range t.dataInsert {
|
||||
ops = append(ops, clientv3.OpPut(etcdPrefix+k, string(v)))
|
||||
}
|
||||
for k := range t.dataDelete {
|
||||
// Each key is only allowed to occur once per transaction
|
||||
if _, ok := t.dataInsert[k]; !ok {
|
||||
ops = append(ops, clientv3.OpDelete(etcdPrefix+k))
|
||||
}
|
||||
}
|
||||
// transaction, so either everything gets applied or nothing
|
||||
_, err := t.store.client.Txn(context.TODO()).Then(ops...).Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.session.Close()
|
||||
t.mut.Unlock()
|
||||
t.ongoingTransaction = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollback aborts a transaction.
|
||||
func (t *EtcdTransaction) Rollback() {
|
||||
if t.ongoingTransaction {
|
||||
t.session.Close()
|
||||
t.mut.Unlock()
|
||||
}
|
||||
t.ongoingTransaction = false
|
||||
}
|
||||
|
||||
type EtcdIterator struct {
|
||||
idx int
|
||||
keys []string
|
||||
}
|
||||
|
||||
// GetNext returns the next element of the iterator.
|
||||
func (i *EtcdIterator) GetNext() (string, error) {
|
||||
if i.idx >= len(i.keys) {
|
||||
return "", &NoElementsLeftError{idx: i.idx}
|
||||
}
|
||||
key := i.keys[i.idx]
|
||||
i.idx++
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// HasNext returns true if there are elements left to get with GetNext().
|
||||
func (i *EtcdIterator) HasNext() bool {
|
||||
return i.idx < len(i.keys)
|
||||
}
|
||||
|
||||
// EtcdStoreFactory is a factory to create EtcdStores.
|
||||
type EtcdStoreFactory struct {
|
||||
Endpoint string
|
||||
ForceTLS bool
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewEtcdStoreFactory creates a new EtcdStoreFactory.
|
||||
func NewEtcdStoreFactory(endpoint string, forceTLS bool, logger *zap.Logger) *EtcdStoreFactory {
|
||||
return &EtcdStoreFactory{
|
||||
Endpoint: endpoint,
|
||||
ForceTLS: forceTLS,
|
||||
Logger: logger.WithOptions(zap.IncreaseLevel(zap.WarnLevel)).Named("etcd"),
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new EtcdStore.
|
||||
func (f *EtcdStoreFactory) New() (Store, error) {
|
||||
return NewEtcdStore(f.Endpoint, f.ForceTLS, f.Logger)
|
||||
}
|
@ -1,92 +0,0 @@
|
||||
//go:build integration
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
etcdImageName = "bitnami/etcd:3.5.2"
|
||||
)
|
||||
|
||||
func TestEtcdStore(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
require.NoError(err)
|
||||
defer dockerClient.Close()
|
||||
|
||||
pullReader, err := dockerClient.ImagePull(ctx, etcdImageName, types.ImagePullOptions{})
|
||||
require.NoError(err)
|
||||
_, err = io.Copy(os.Stdout, pullReader)
|
||||
require.NoError(err)
|
||||
require.NoError(pullReader.Close())
|
||||
|
||||
etcdHostConfig := &container.HostConfig{AutoRemove: true}
|
||||
etcdContainerConfig := &container.Config{
|
||||
Image: etcdImageName,
|
||||
Env: []string{
|
||||
"ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379",
|
||||
"ETCD_ADVERTISE_CLIENT_URLS=http://127.0.0.1:2379",
|
||||
"ETCD_LOG_LEVEL=debug",
|
||||
"ETCD_DATA_DIR=/bitnami/etcd/data",
|
||||
},
|
||||
Entrypoint: []string{"/opt/bitnami/etcd/bin/etcd"},
|
||||
AttachStdout: true, // necessary to attach to the container log
|
||||
AttachStderr: true, // necessary to attach to the container log
|
||||
Tty: true, // necessary to attach to the container log
|
||||
}
|
||||
|
||||
t.Log("create etcd container...")
|
||||
createResp, err := dockerClient.ContainerCreate(ctx, etcdContainerConfig, etcdHostConfig, nil, nil, "etcd-storage-unittest")
|
||||
require.NoError(err)
|
||||
require.NoError(dockerClient.ContainerStart(ctx, createResp.ID, types.ContainerStartOptions{}))
|
||||
|
||||
logReader, err := dockerClient.ContainerLogs(ctx, createResp.ID, types.ContainerLogsOptions{ShowStdout: true, Follow: true})
|
||||
require.NoError(err)
|
||||
go io.Copy(os.Stdout, logReader)
|
||||
|
||||
containerData, err := dockerClient.ContainerInspect(ctx, createResp.ID)
|
||||
require.NoError(err)
|
||||
t.Logf("etcd Docker IP-Addr %v", containerData.NetworkSettings.IPAddress)
|
||||
|
||||
//
|
||||
// Run the store test.
|
||||
//
|
||||
store, err := NewEtcdStore(net.JoinHostPort(containerData.NetworkSettings.IPAddress, "2379"), false, nil)
|
||||
require.NoError(err)
|
||||
defer store.Close()
|
||||
|
||||
// TODO: since the etcd store does network, it should be canceled with a timeout.
|
||||
testStore(t, func() (Store, error) {
|
||||
clearStore(require, store)
|
||||
return store, nil
|
||||
})
|
||||
|
||||
// Usually call it with a defer statement. However this causes problems with the construct above
|
||||
require.NoError(dockerClient.ContainerStop(ctx, createResp.ID, nil))
|
||||
}
|
||||
|
||||
func clearStore(require *require.Assertions, store Store) {
|
||||
iter, err := store.Iterator("")
|
||||
require.NoError(err)
|
||||
for iter.HasNext() {
|
||||
key, err := iter.GetNext()
|
||||
require.NoError(err)
|
||||
err = store.Delete(key)
|
||||
require.NoError(err)
|
||||
}
|
||||
}
|
@ -1,197 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// StdStore is the standard implementation of the Store interface.
|
||||
type StdStore struct {
|
||||
data map[string]string
|
||||
mut, txmut sync.Mutex
|
||||
}
|
||||
|
||||
// NewStdStore creates and initializes a new StdStore object.
|
||||
func NewStdStore() *StdStore {
|
||||
s := &StdStore{
|
||||
data: make(map[string]string),
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Get retrieves a value from StdStore by Type and Name.
|
||||
func (s *StdStore) Get(request string) ([]byte, error) {
|
||||
s.mut.Lock()
|
||||
value, ok := s.data[request]
|
||||
s.mut.Unlock()
|
||||
|
||||
if ok {
|
||||
return []byte(value), nil
|
||||
}
|
||||
return nil, &ValueUnsetError{requestedValue: request}
|
||||
}
|
||||
|
||||
// Put saves a value in StdStore by Type and Name.
|
||||
func (s *StdStore) Put(request string, requestData []byte) error {
|
||||
tx, err := s.BeginTransaction()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
if err := tx.Put(request, requestData); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (s *StdStore) Delete(key string) error {
|
||||
tx, err := s.BeginTransaction()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
if err := tx.Delete(key); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// Iterator returns an iterator for keys saved in StdStore with a given prefix.
|
||||
// For an empty prefix this is an iterator for all keys in StdStore.
|
||||
func (s *StdStore) Iterator(prefix string) (Iterator, error) {
|
||||
keys := make([]string, 0)
|
||||
s.mut.Lock()
|
||||
for k := range s.data {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
s.mut.Unlock()
|
||||
|
||||
return &StdIterator{0, keys}, nil
|
||||
}
|
||||
|
||||
// BeginTransaction starts a new transaction.
|
||||
func (s *StdStore) BeginTransaction() (Transaction, error) {
|
||||
tx := stdTransaction{
|
||||
store: s,
|
||||
data: map[string]string{},
|
||||
ongoingTransaction: true,
|
||||
}
|
||||
s.txmut.Lock()
|
||||
|
||||
s.mut.Lock()
|
||||
for k, v := range s.data {
|
||||
tx.data[k] = v
|
||||
}
|
||||
s.mut.Unlock()
|
||||
|
||||
return &tx, nil
|
||||
}
|
||||
|
||||
func (s *StdStore) commit(data map[string]string) error {
|
||||
s.mut.Lock()
|
||||
s.data = data
|
||||
s.mut.Unlock()
|
||||
|
||||
s.txmut.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StdStore) Transfer(newstore Store) error {
|
||||
s.mut.Lock()
|
||||
// copy key:value pairs from the old storage into etcd
|
||||
for key, value := range s.data {
|
||||
if err := newstore.Put(key, []byte(value)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.mut.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
type stdTransaction struct {
|
||||
store *StdStore
|
||||
data map[string]string
|
||||
ongoingTransaction bool
|
||||
}
|
||||
|
||||
// Get retrieves a value.
|
||||
func (t *stdTransaction) Get(request string) ([]byte, error) {
|
||||
if !t.ongoingTransaction {
|
||||
return nil, &TransactionAlreadyCommittedError{op: "Get"}
|
||||
}
|
||||
if value, ok := t.data[request]; ok {
|
||||
return []byte(value), nil
|
||||
}
|
||||
return nil, &ValueUnsetError{requestedValue: request}
|
||||
}
|
||||
|
||||
// Put saves a value.
|
||||
func (t *stdTransaction) Put(request string, requestData []byte) error {
|
||||
if !t.ongoingTransaction {
|
||||
return &TransactionAlreadyCommittedError{op: "Put"}
|
||||
}
|
||||
t.data[request] = string(requestData)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *stdTransaction) Delete(key string) error {
|
||||
if !t.ongoingTransaction {
|
||||
return &TransactionAlreadyCommittedError{op: "Delete"}
|
||||
}
|
||||
delete(t.data, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterator returns an iterator for all keys in the transaction with a given prefix.
|
||||
func (t *stdTransaction) Iterator(prefix string) (Iterator, error) {
|
||||
keys := make([]string, 0)
|
||||
for k := range t.data {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
|
||||
return &StdIterator{0, keys}, nil
|
||||
}
|
||||
|
||||
// Commit ends a transaction and persists the changes.
|
||||
func (t *stdTransaction) Commit() error {
|
||||
if err := t.store.commit(t.data); err != nil {
|
||||
return err
|
||||
}
|
||||
t.store = nil
|
||||
t.ongoingTransaction = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollback aborts a transaction.
|
||||
func (t *stdTransaction) Rollback() {
|
||||
if t.store != nil {
|
||||
t.store.txmut.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// StdIterator is the standard Iterator implementation.
|
||||
type StdIterator struct {
|
||||
idx int
|
||||
keys []string
|
||||
}
|
||||
|
||||
// GetNext returns the next element of the iterator.
|
||||
func (i *StdIterator) GetNext() (string, error) {
|
||||
if i.idx >= len(i.keys) {
|
||||
return "", &NoElementsLeftError{idx: i.idx}
|
||||
}
|
||||
key := i.keys[i.idx]
|
||||
i.idx++
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// HasNext returns true if there are elements left to get with GetNext().
|
||||
func (i *StdIterator) HasNext() bool {
|
||||
return i.idx < len(i.keys)
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
package store
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestStdStore(t *testing.T) {
|
||||
testStore(t, func() (Store, error) {
|
||||
return NewStdStore(), nil
|
||||
})
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Store is the interface for persistence.
|
||||
type Store interface {
|
||||
// Get returns a value from store by key.
|
||||
Get(string) ([]byte, error)
|
||||
// Put saves a value to store by key.
|
||||
Put(string, []byte) error
|
||||
// Delete deletes the key.
|
||||
Delete(string) error
|
||||
// Iterator returns an Iterator for a given prefix.
|
||||
Iterator(string) (Iterator, error)
|
||||
// BeginTransaction starts a new transaction.
|
||||
BeginTransaction() (Transaction, error)
|
||||
// Transfer copies the whole store Database.
|
||||
Transfer(Store) error
|
||||
}
|
||||
|
||||
// Transaction is a Store transaction.
|
||||
type Transaction interface {
|
||||
// Get returns a value from store by key.
|
||||
Get(string) ([]byte, error)
|
||||
// Put saves a value to store by key.
|
||||
Put(string, []byte) error
|
||||
// Delete deletes the key.
|
||||
Delete(string) error
|
||||
// Iterator returns an Iterator for a given prefix.
|
||||
Iterator(string) (Iterator, error)
|
||||
// Commit ends a transaction and persists the changes.
|
||||
Commit() error
|
||||
// Rollback aborts a transaction. Noop if already committed.
|
||||
Rollback()
|
||||
}
|
||||
|
||||
// Iterator is an iterator for the store.
|
||||
type Iterator interface {
|
||||
// GetNext returns the next element of the iterator.
|
||||
GetNext() (string, error)
|
||||
// HasNext returns true if there are elements left to get with GetNext().
|
||||
HasNext() bool
|
||||
}
|
||||
|
||||
// ValueUnsetError is an error raised by unset values in the store.
|
||||
type ValueUnsetError struct {
|
||||
requestedValue string
|
||||
}
|
||||
|
||||
// Error implements the Error interface.
|
||||
func (s *ValueUnsetError) Error() string {
|
||||
return fmt.Sprintf("store: requested value not set: %s", s.requestedValue)
|
||||
}
|
||||
|
||||
// NoElementsLeftError occurs when trying to get an element from an interator that
|
||||
// doesn't have elements left.
|
||||
type NoElementsLeftError struct {
|
||||
idx int
|
||||
}
|
||||
|
||||
// Error implements the Error interface.
|
||||
func (n *NoElementsLeftError) Error() string {
|
||||
return fmt.Sprintf("index out of range [%d]", n.idx)
|
||||
}
|
||||
|
||||
// TransactionAlreadyCommittedError occurs when further operations:
|
||||
// Get, Put, Delete or Iterate are called on a committed transaction.
|
||||
type TransactionAlreadyCommittedError struct {
|
||||
op string
|
||||
}
|
||||
|
||||
func (t *TransactionAlreadyCommittedError) Error() string {
|
||||
return fmt.Sprintf("transaction is already committed, but %s is called", t.op)
|
||||
}
|
@ -1,532 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
var newStore func() (Store, error)
|
||||
|
||||
func testStore(t *testing.T, storeFactory func() (Store, error)) {
|
||||
newStore = storeFactory
|
||||
|
||||
t.Run("NewStore", testNewStore)
|
||||
t.Run("NewStoreIsEmpty", testNewStoreIsEmpty)
|
||||
t.Run("NewStoreClearsStore", testNewStoreClearsStore)
|
||||
t.Run("Put", testPut)
|
||||
t.Run("PutTwice", testPutTwice)
|
||||
t.Run("Get", testGet)
|
||||
t.Run("GetNonExisting", testGetNonExisting)
|
||||
t.Run("Delete", testDelete)
|
||||
t.Run("DeleteNonExisting", testDeleteNonExisting)
|
||||
t.Run("Iterator", testIterator)
|
||||
t.Run("IteratorSingleKey", testIteratorSingleKey)
|
||||
t.Run("IteratorNoValues", testIteratorNoValues)
|
||||
t.Run("IteratorRace", testIteratorRace)
|
||||
t.Run("Transaction", testTransaction)
|
||||
t.Run("TransactionInternalChangesVisible", testTransactionInternalChangesVisible)
|
||||
t.Run("TransactionInternalChangesNotVisibleOutside", testTransactionInternalChangesNotVisibleOutside)
|
||||
t.Run("TransactionNoop", testTransactionNoop)
|
||||
t.Run("TransactionDeleteThenPut", testTransactionDeleteThenPut)
|
||||
t.Run("TransactionDelete", testTransactionDelete)
|
||||
t.Run("TransactionIterator", testTransactionIterator)
|
||||
t.Run("TransactionIterateNotSeeDeleted", testTransactionIterateNotSeeDeleted)
|
||||
t.Run("TransactionGetAfterCommit", testTransactionGetAfterCommit)
|
||||
t.Run("TransactionPutAfterCommit", testTransactionPutAfterCommit)
|
||||
t.Run("TransactionDeleteAfterCommit", testTransactionDeleteAfterCommit)
|
||||
t.Run("RollbackPut", testRollbackPut)
|
||||
t.Run("RollbackDelete", testRollbackDelete)
|
||||
t.Run("Concurrency", testConcurrency)
|
||||
t.Run("StoreByValue", testStoreByValue)
|
||||
t.Run("IndependentTest", testIndependentTest)
|
||||
t.Run("IndependentTestReader", testIndependentTestReader)
|
||||
}
|
||||
|
||||
func testNewStore(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
_, err := newStore()
|
||||
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
func testNewStoreIsEmpty(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
iter, err := store.Iterator("")
|
||||
require.NoError(err)
|
||||
require.False(iter.HasNext())
|
||||
}
|
||||
|
||||
func testNewStoreClearsStore(t *testing.T) {
|
||||
require := require.New(t)
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
require.NoError(store.Put("key", []byte("value")))
|
||||
|
||||
store, err = newStore()
|
||||
require.NoError(err)
|
||||
|
||||
iter, err := store.Iterator("")
|
||||
require.NoError(err)
|
||||
require.False(iter.HasNext())
|
||||
}
|
||||
|
||||
func testPut(t *testing.T) {
|
||||
require := require.New(t)
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
err = store.Put("key", []byte("value"))
|
||||
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
func testPutTwice(t *testing.T) {
|
||||
require := require.New(t)
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
err = store.Put("key", []byte("value"))
|
||||
require.NoError(err)
|
||||
|
||||
err = store.Put("key", []byte("newValue"))
|
||||
require.NoError(err)
|
||||
|
||||
fetchedValue, err := store.Get("key")
|
||||
require.NoError(err)
|
||||
require.Equal([]byte("newValue"), fetchedValue)
|
||||
}
|
||||
|
||||
func testGet(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
err = store.Put("key", []byte("value"))
|
||||
require.NoError(err)
|
||||
|
||||
fetchedValue, err := store.Get("key")
|
||||
|
||||
require.NoError(err)
|
||||
require.Equal([]byte("value"), fetchedValue)
|
||||
}
|
||||
|
||||
func testGetNonExisting(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
_, err = store.Get("key")
|
||||
|
||||
var unsetError *ValueUnsetError
|
||||
require.ErrorAs(err, &unsetError)
|
||||
}
|
||||
|
||||
func testDelete(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
err = store.Put("key", []byte("value"))
|
||||
require.NoError(err)
|
||||
|
||||
err = store.Delete("key")
|
||||
require.NoError(err)
|
||||
|
||||
_, err = store.Get("key")
|
||||
var unsetError *ValueUnsetError
|
||||
require.ErrorAs(err, &unsetError)
|
||||
}
|
||||
|
||||
// Deleting a non-existing key is fine, and should not result in error.
|
||||
func testDeleteNonExisting(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
err = store.Delete("key")
|
||||
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
func testIterator(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
require.NoError(store.Put("iterate:1", []byte("one")))
|
||||
require.NoError(store.Put("iterate:2", []byte("two")))
|
||||
require.NoError(store.Put("iterate:3", []byte("three")))
|
||||
|
||||
iter, err := store.Iterator("iterate")
|
||||
require.NoError(err)
|
||||
idx := 0
|
||||
for iter.HasNext() {
|
||||
idx++
|
||||
val, err := iter.GetNext()
|
||||
assert.NoError(err)
|
||||
assert.Contains(val, "iterate:")
|
||||
}
|
||||
assert.Equal(3, idx)
|
||||
}
|
||||
|
||||
func testIteratorSingleKey(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
require.NoError(store.Put("key", []byte("value")))
|
||||
|
||||
iter, err := store.Iterator("key")
|
||||
require.NoError(err)
|
||||
key, err := iter.GetNext()
|
||||
require.NoError(err)
|
||||
assert.Equal("key", key)
|
||||
require.False(iter.HasNext())
|
||||
}
|
||||
|
||||
func testIteratorNoValues(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
iter, err := store.Iterator("iterate")
|
||||
require.NoError(err)
|
||||
require.False(iter.HasNext())
|
||||
|
||||
_, err = iter.GetNext()
|
||||
var noElementsLeftError *NoElementsLeftError
|
||||
require.ErrorAs(err, &noElementsLeftError)
|
||||
}
|
||||
|
||||
// Test the race condition in the stdStore.
|
||||
// This must be specified in the test through [-race].
|
||||
func testIteratorRace(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
tx, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
require.NoError(tx.Put("key", []byte("value")))
|
||||
go func() {
|
||||
_, err = store.Iterator("key")
|
||||
require.NoError(err)
|
||||
}()
|
||||
require.NoError(tx.Commit())
|
||||
}
|
||||
|
||||
func testTransaction(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
require.NoError(store.Put("toBeDeleted", []byte{0x00, 0x00}))
|
||||
|
||||
tx1, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
require.NoError(tx1.Put("newKey", []byte{0x11, 0x11}))
|
||||
require.NoError(tx1.Delete("toBeDeleted"))
|
||||
require.NoError(tx1.Commit())
|
||||
|
||||
result, err := store.Get("newKey")
|
||||
require.NoError(err)
|
||||
assert.Equal(result, []byte{0x11, 0x11})
|
||||
_, err = store.Get("toBeDeleted")
|
||||
require.Error(err)
|
||||
}
|
||||
|
||||
func testTransactionInternalChangesVisible(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
tx, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
require.NoError(tx.Put(("key"), []byte("value")))
|
||||
|
||||
fetchedValue, err := tx.Get("key")
|
||||
require.NoError(err)
|
||||
require.Equal([]byte("value"), fetchedValue)
|
||||
}
|
||||
|
||||
func testTransactionInternalChangesNotVisibleOutside(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
tx, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
require.NoError(tx.Put(("key"), []byte("value")))
|
||||
|
||||
_, err = store.Get("key")
|
||||
var valueUnsetError *ValueUnsetError
|
||||
require.ErrorAs(err, &valueUnsetError)
|
||||
}
|
||||
|
||||
func testTransactionNoop(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
tx, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
require.NotNil(tx)
|
||||
|
||||
err = tx.Commit()
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
func testTransactionDeleteThenPut(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
require.NoError(store.Put("key", []byte{0x00, 0x00}))
|
||||
|
||||
tx1, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
|
||||
require.NoError(tx1.Put("key", []byte{0x00, 0x11}))
|
||||
assert.NoError(tx1.Delete("key"))
|
||||
require.NoError(tx1.Put("key", []byte{0x7, 0x8}))
|
||||
assert.NoError(tx1.Commit())
|
||||
|
||||
result, err := store.Get("key")
|
||||
assert.NoError(err)
|
||||
assert.Equal([]byte{0x7, 0x8}, result)
|
||||
}
|
||||
|
||||
func testTransactionDelete(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
require.NoError(store.Put("key", []byte{0x00, 0x00}))
|
||||
|
||||
tx1, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
assert.NoError(tx1.Delete("key"))
|
||||
_, err = tx1.Get("key")
|
||||
assert.Error(err)
|
||||
assert.NoError(tx1.Commit())
|
||||
}
|
||||
|
||||
func testTransactionIterator(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
require.NoError(store.Put("key", []byte("value")))
|
||||
|
||||
tx, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
iter, err := tx.Iterator("key")
|
||||
require.NoError(err)
|
||||
key, err := iter.GetNext()
|
||||
require.NoError(err)
|
||||
assert.Equal("key", key)
|
||||
require.NoError(tx.Commit())
|
||||
require.False(iter.HasNext())
|
||||
}
|
||||
|
||||
func testTransactionIterateNotSeeDeleted(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
require.NoError(store.Put("key:1", []byte("value")))
|
||||
require.NoError(store.Put("key:2", []byte("otherValue")))
|
||||
|
||||
tx, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
require.NoError(tx.Delete("key:1"))
|
||||
iter, err := tx.Iterator("key")
|
||||
require.NoError(err)
|
||||
key, err := iter.GetNext()
|
||||
require.NoError(err)
|
||||
assert.Equal("key:2", key)
|
||||
require.NoError(tx.Commit())
|
||||
require.False(iter.HasNext())
|
||||
}
|
||||
|
||||
func testTransactionGetAfterCommit(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
tx, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
require.NoError(tx.Put("key", []byte("value")))
|
||||
require.NoError(tx.Commit())
|
||||
|
||||
_, err = tx.Get("key")
|
||||
var alreadyCommittedError *TransactionAlreadyCommittedError
|
||||
require.ErrorAs(err, &alreadyCommittedError)
|
||||
}
|
||||
|
||||
func testTransactionPutAfterCommit(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
tx, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
require.NoError(tx.Put("key", []byte("value")))
|
||||
require.NoError(tx.Commit())
|
||||
|
||||
err = tx.Put("key", []byte("newValue"))
|
||||
var alreadyCommittedError *TransactionAlreadyCommittedError
|
||||
require.ErrorAs(err, &alreadyCommittedError)
|
||||
}
|
||||
|
||||
func testTransactionDeleteAfterCommit(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
tx, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
require.NoError(tx.Put("key", []byte("value")))
|
||||
require.NoError(tx.Commit())
|
||||
|
||||
err = tx.Delete("key")
|
||||
var alreadyCommittedError *TransactionAlreadyCommittedError
|
||||
require.ErrorAs(err, &alreadyCommittedError)
|
||||
}
|
||||
|
||||
func testRollbackPut(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
tx, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
err = tx.Put("key", []byte("value"))
|
||||
require.NoError(err)
|
||||
tx.Rollback()
|
||||
|
||||
_, err = store.Get("key")
|
||||
var valueUnsetError *ValueUnsetError
|
||||
assert.ErrorAs(err, &valueUnsetError)
|
||||
}
|
||||
|
||||
func testRollbackDelete(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
require.NoError(store.Put("key", []byte("value")))
|
||||
|
||||
tx, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
err = tx.Delete("key")
|
||||
require.NoError(err)
|
||||
tx.Rollback()
|
||||
|
||||
fetchedValue, err := store.Get("key")
|
||||
require.NoError(err)
|
||||
assert.Equal([]byte("value"), fetchedValue)
|
||||
}
|
||||
|
||||
// Test explicitly the storeLocking mechanism.
|
||||
// This could fail for non-blocking transactions.
|
||||
func testConcurrency(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
tx1, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
assert.NoError(tx1.Put("key", []byte("one")))
|
||||
|
||||
go func() {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
require.NoError(tx1.Commit())
|
||||
}()
|
||||
tx2, err := store.BeginTransaction()
|
||||
require.NoError(err)
|
||||
result, err := tx2.Get("key")
|
||||
require.NoError(err)
|
||||
assert.Equal(result, []byte("one"))
|
||||
assert.NoError(tx2.Put("key", []byte("two")))
|
||||
assert.NoError(tx2.Commit())
|
||||
result, err = store.Get("key")
|
||||
require.NoError(err)
|
||||
assert.Equal(result, []byte("two"))
|
||||
}
|
||||
|
||||
func testStoreByValue(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
testValue := []byte{0xF1, 0xFF}
|
||||
require.NoError(store.Put("StoreByValue", testValue))
|
||||
|
||||
testValue[0] = 0x00
|
||||
storeValue, err := store.Get("StoreByValue")
|
||||
|
||||
require.NoError(err)
|
||||
assert.NotEqual(storeValue[0], testValue[0])
|
||||
}
|
||||
|
||||
func testIndependentTest(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
err = store.Put("uniqueTestKey253", []byte("value"))
|
||||
require.NoError(err)
|
||||
|
||||
value, err := store.Get("uniqueTestKey253")
|
||||
require.NoError(err)
|
||||
assert.Equal([]byte("value"), value)
|
||||
}
|
||||
|
||||
func testIndependentTestReader(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
store, err := newStore()
|
||||
require.NoError(err)
|
||||
|
||||
// This test should not see & depend on the key `testIndependentTest` sets.
|
||||
_, err = store.Get("uniqueTestKey253")
|
||||
var unsetErr *ValueUnsetError
|
||||
assert.ErrorAs(err, &unsetErr)
|
||||
}
|
@ -1,341 +0,0 @@
|
||||
package storewrapper
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/coordinator/store"
|
||||
kms "github.com/edgelesssys/constellation/kms/setup"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
// variables which will be used as a store-prefix start with prefix[...].
|
||||
// variables which will be used as a store-key start with key[...].
|
||||
const (
|
||||
keyHighestAvailableCoordinatorIP = "highestAvailableCoordinatorIP"
|
||||
keyHighestAvailableNodeIP = "highestAvailableNodeIP"
|
||||
keyKubernetesJoinCommand = "kubeJoin"
|
||||
keyPeersResourceVersion = "peersResourceVersion"
|
||||
keyMasterSecret = "masterSecret"
|
||||
keyKubeConfig = "kubeConfig"
|
||||
keyClusterID = "clusterID"
|
||||
keyKMSData = "KMSData"
|
||||
keyKEKID = "kekID"
|
||||
prefixFreeCoordinatorIPs = "freeCoordinatorVPNIPs"
|
||||
prefixPeerLocation = "peerPrefix"
|
||||
prefixFreeNodeIPs = "freeNodeVPNIPs"
|
||||
)
|
||||
|
||||
var (
|
||||
coordinatorIPRangeStart = netip.AddrFrom4([4]byte{10, 118, 0, 1})
|
||||
coordinatorIPRangeEnd = netip.AddrFrom4([4]byte{10, 118, 0, 10})
|
||||
nodeIPRangeStart = netip.AddrFrom4([4]byte{10, 118, 0, 11})
|
||||
nodeIPRangeEnd = netip.AddrFrom4([4]byte{10, 118, 255, 254})
|
||||
)
|
||||
|
||||
// StoreWrapper is a wrapper for the store interface.
|
||||
type StoreWrapper struct {
|
||||
Store interface {
|
||||
Get(string) ([]byte, error)
|
||||
Put(string, []byte) error
|
||||
Delete(string) error
|
||||
Iterator(string) (store.Iterator, error)
|
||||
}
|
||||
}
|
||||
|
||||
// GetState returns the state from store.
|
||||
func (s StoreWrapper) GetState() (state.State, error) {
|
||||
rawState, err := s.Store.Get("state")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
currState, err := strconv.Atoi(string(rawState))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return state.State(currState), nil
|
||||
}
|
||||
|
||||
// PutState saves the state to store.
|
||||
func (s StoreWrapper) PutState(currState state.State) error {
|
||||
rawState := []byte(strconv.Itoa(int(currState)))
|
||||
return s.Store.Put("state", rawState)
|
||||
}
|
||||
|
||||
// PutPeer puts a single peer in the store, with a unique key derived form the VPNIP.
|
||||
func (s StoreWrapper) PutPeer(peer peer.Peer) error {
|
||||
if len(peer.VPNIP) == 0 {
|
||||
return errors.New("unique ID of peer not set")
|
||||
}
|
||||
jsonPeer, err := json.Marshal(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Store.Put(prefixPeerLocation+peer.VPNIP, jsonPeer)
|
||||
}
|
||||
|
||||
// RemovePeer removes a peer from the store.
|
||||
func (s StoreWrapper) RemovePeer(peer peer.Peer) error {
|
||||
return s.Store.Delete(prefixPeerLocation + peer.VPNIP)
|
||||
}
|
||||
|
||||
// GetPeers returns all peers in the store.
|
||||
func (s StoreWrapper) GetPeers() ([]peer.Peer, error) {
|
||||
return s.getPeersByPrefix(prefixPeerLocation)
|
||||
}
|
||||
|
||||
// IncrementPeersResourceVersion increments the version of the stored peers.
|
||||
// Should be called in a transaction together with Add/Remove operation(s).
|
||||
func (s StoreWrapper) IncrementPeersResourceVersion() error {
|
||||
val, err := s.GetPeersResourceVersion()
|
||||
var unsetErr *store.ValueUnsetError
|
||||
if errors.As(err, &unsetErr) {
|
||||
val = 0
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Store.Put(keyPeersResourceVersion, []byte(strconv.Itoa(val+1)))
|
||||
}
|
||||
|
||||
// GetPeersResourceVersion returns the current version of the stored peers.
|
||||
func (s StoreWrapper) GetPeersResourceVersion() (int, error) {
|
||||
raw, err := s.Store.Get(keyPeersResourceVersion)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
val, err := strconv.Atoi(string(raw))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (s StoreWrapper) getPeersByPrefix(prefix string) ([]peer.Peer, error) {
|
||||
peerKeys, err := s.Store.Iterator(prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var peers []peer.Peer
|
||||
for peerKeys.HasNext() {
|
||||
storeKey, err := peerKeys.GetNext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
marshalPeer, err := s.Store.Get(storeKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var peer peer.Peer
|
||||
if err := json.Unmarshal(marshalPeer, &peer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peers = append(peers, peer)
|
||||
|
||||
}
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
// GetKubernetesJoinArgs returns the Kubernetes join command from store.
|
||||
func (s StoreWrapper) GetKubernetesJoinArgs() (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
rawJoinCommand, err := s.Store.Get(keyKubernetesJoinCommand)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
joinCommand := kubeadm.BootstrapTokenDiscovery{}
|
||||
if err := json.Unmarshal(rawJoinCommand, &joinCommand); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &joinCommand, nil
|
||||
}
|
||||
|
||||
// GetKubernetesConfig returns the Kubernetes kubeconfig file to authenticate with the Kubernetes API.
|
||||
func (s StoreWrapper) GetKubernetesConfig() ([]byte, error) {
|
||||
return s.Store.Get(keyKubeConfig)
|
||||
}
|
||||
|
||||
// PutKubernetesConfig saves the Kubernetes kubeconfig file command to store.
|
||||
func (s StoreWrapper) PutKubernetesConfig(kubeConfig []byte) error {
|
||||
return s.Store.Put(keyKubeConfig, kubeConfig)
|
||||
}
|
||||
|
||||
// GetMasterSecret returns the Constellation master secret from store.
|
||||
func (s StoreWrapper) GetMasterSecret() ([]byte, error) {
|
||||
return s.Store.Get(keyMasterSecret)
|
||||
}
|
||||
|
||||
// PutMasterSecret saves the Constellation master secret to store.
|
||||
func (s StoreWrapper) PutMasterSecret(masterSecret []byte) error {
|
||||
return s.Store.Put(keyMasterSecret, masterSecret)
|
||||
}
|
||||
|
||||
// GetKEKID returns the key encryption key ID from store.
|
||||
func (s StoreWrapper) GetKEKID() (string, error) {
|
||||
kekID, err := s.Store.Get(keyKEKID)
|
||||
return string(kekID), err
|
||||
}
|
||||
|
||||
// PutKEKID saves the key encryption key ID to store.
|
||||
func (s StoreWrapper) PutKEKID(kekID string) error {
|
||||
return s.Store.Put(keyKEKID, []byte(kekID))
|
||||
}
|
||||
|
||||
// GetKMSData returns the KMSData from the store.
|
||||
func (s StoreWrapper) GetKMSData() (kms.KMSInformation, error) {
|
||||
storeData, err := s.Store.Get(keyKMSData)
|
||||
if err != nil {
|
||||
return kms.KMSInformation{}, err
|
||||
}
|
||||
data := kms.KMSInformation{}
|
||||
if err := json.Unmarshal(storeData, &data); err != nil {
|
||||
return kms.KMSInformation{}, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// PutKMSData puts the KMSData in the store.
|
||||
func (s StoreWrapper) PutKMSData(kmsInfo kms.KMSInformation) error {
|
||||
byteKMSInfo, err := json.Marshal(kmsInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Store.Put(keyKMSData, byteKMSInfo)
|
||||
}
|
||||
|
||||
// GetClusterID returns the unique identifier of the cluster from store.
|
||||
func (s StoreWrapper) GetClusterID() ([]byte, error) {
|
||||
return s.Store.Get(keyClusterID)
|
||||
}
|
||||
|
||||
// PutClusterID saves the unique identifier of the cluster to store.
|
||||
func (s StoreWrapper) PutClusterID(clusterID []byte) error {
|
||||
return s.Store.Put(keyClusterID, clusterID)
|
||||
}
|
||||
|
||||
func (s StoreWrapper) InitializeStoreIPs() error {
|
||||
if err := s.PutNextNodeIP(nodeIPRangeStart); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.PutNextCoordinatorIP(coordinatorIPRangeStart)
|
||||
}
|
||||
|
||||
// PutNextCoordinatorIP puts the last used ip into the store.
|
||||
func (s StoreWrapper) PutNextCoordinatorIP(ip netip.Addr) error {
|
||||
return s.Store.Put(keyHighestAvailableCoordinatorIP, ip.AsSlice())
|
||||
}
|
||||
|
||||
// getNextCoordinatorIP generates addresses from a /16 subnet.
|
||||
func (s StoreWrapper) getNextCoordinatorIP() (netip.Addr, error) {
|
||||
byteIP, err := s.Store.Get(keyHighestAvailableCoordinatorIP)
|
||||
if err != nil {
|
||||
return netip.Addr{}, errors.New("could not obtain IP from store")
|
||||
}
|
||||
ip, ok := netip.AddrFromSlice(byteIP)
|
||||
if !ok {
|
||||
return netip.Addr{}, fmt.Errorf("ip addr malformed %v", byteIP)
|
||||
}
|
||||
if !ip.IsValid() || ip.Compare(coordinatorIPRangeEnd) == 1 {
|
||||
return netip.Addr{}, errors.New("no ips left to assign")
|
||||
}
|
||||
nextIP := ip.Next()
|
||||
if err := s.PutNextCoordinatorIP(nextIP); err != nil {
|
||||
return netip.Addr{}, errors.New("could not put IP to store")
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
// PopNextFreeCoordinatorIP return the next free IP, these could be a old one from a removed peer
|
||||
// or a newly generated IP.
|
||||
func (s StoreWrapper) PopNextFreeCoordinatorIP() (netip.Addr, error) {
|
||||
vpnIP, err := s.getFreedVPNIP(prefixFreeCoordinatorIPs)
|
||||
var noElementsError *store.NoElementsLeftError
|
||||
if errors.As(err, &noElementsError) {
|
||||
return s.getNextCoordinatorIP()
|
||||
}
|
||||
if err != nil {
|
||||
return netip.Addr{}, err
|
||||
}
|
||||
return vpnIP, nil
|
||||
}
|
||||
|
||||
// PutFreedCoordinatorVPNIP puts a already generated VPNIP (IP < highestAvailableCoordinatorIP ),
|
||||
// which is currently unused, into the store.
|
||||
// The IP is saved at a specific prefix and will be used with priority when we
|
||||
// request a new Coordinator IP.
|
||||
func (s StoreWrapper) PutFreedCoordinatorVPNIP(vpnIP string) error {
|
||||
return s.Store.Put(prefixFreeCoordinatorIPs+vpnIP, nil)
|
||||
}
|
||||
|
||||
// PutNextNodeIP puts the last used ip into the store.
|
||||
func (s StoreWrapper) PutNextNodeIP(ip netip.Addr) error {
|
||||
return s.Store.Put(keyHighestAvailableNodeIP, ip.AsSlice())
|
||||
}
|
||||
|
||||
// getNextNodeIP generates addresses from a /16 subnet.
|
||||
func (s StoreWrapper) getNextNodeIP() (netip.Addr, error) {
|
||||
byteIP, err := s.Store.Get(keyHighestAvailableNodeIP)
|
||||
if err != nil {
|
||||
return netip.Addr{}, errors.New("could not obtain IP from store")
|
||||
}
|
||||
ip, ok := netip.AddrFromSlice(byteIP)
|
||||
if !ok {
|
||||
return netip.Addr{}, fmt.Errorf("ip addr malformed %v", byteIP)
|
||||
}
|
||||
if !ip.IsValid() || ip.Compare(nodeIPRangeEnd) == 1 {
|
||||
return netip.Addr{}, errors.New("no ips left to assign")
|
||||
}
|
||||
nextIP := ip.Next()
|
||||
|
||||
if err := s.PutNextNodeIP(nextIP); err != nil {
|
||||
return netip.Addr{}, errors.New("could not put IP to store")
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
// PopNextFreeNodeIP return the next free IP, these could be a old one from a removed peer
|
||||
// or a newly generated IP.
|
||||
func (s StoreWrapper) PopNextFreeNodeIP() (netip.Addr, error) {
|
||||
vpnIP, err := s.getFreedVPNIP(prefixFreeNodeIPs)
|
||||
var noElementsError *store.NoElementsLeftError
|
||||
if errors.As(err, &noElementsError) {
|
||||
return s.getNextNodeIP()
|
||||
}
|
||||
if err != nil {
|
||||
return netip.Addr{}, err
|
||||
}
|
||||
return vpnIP, nil
|
||||
}
|
||||
|
||||
// PutFreedNodeVPNIP puts a already generated VPNIP (IP < highestAvailableNodeIP ),
|
||||
// which is currently unused, into the store.
|
||||
// The IP is saved at a specific prefix and will be used with priority when we
|
||||
// request a new Node IP.
|
||||
func (s StoreWrapper) PutFreedNodeVPNIP(vpnIP string) error {
|
||||
return s.Store.Put(prefixFreeNodeIPs+vpnIP, nil)
|
||||
}
|
||||
|
||||
// getFreedVPNIP reclaims a VPNIP from the store and removes it from there.
|
||||
func (s StoreWrapper) getFreedVPNIP(prefix string) (netip.Addr, error) {
|
||||
iter, err := s.Store.Iterator(prefix)
|
||||
if err != nil {
|
||||
return netip.Addr{}, err
|
||||
}
|
||||
vpnIPWithPrefix, err := iter.GetNext()
|
||||
if err != nil {
|
||||
return netip.Addr{}, err
|
||||
}
|
||||
stringVPNIP := strings.TrimPrefix(vpnIPWithPrefix, prefix)
|
||||
vpnIP, err := netip.ParseAddr(stringVPNIP)
|
||||
if err != nil {
|
||||
return netip.Addr{}, fmt.Errorf("ip addr malformed %v, %w", stringVPNIP, err)
|
||||
}
|
||||
|
||||
return vpnIP, s.Store.Delete(vpnIPWithPrefix)
|
||||
}
|
@ -1,327 +0,0 @@
|
||||
package storewrapper
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/coordinator/store"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m,
|
||||
// https://github.com/census-instrumentation/opencensus-go/issues/1262
|
||||
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
|
||||
)
|
||||
}
|
||||
|
||||
func TestStoreWrapper(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
curState := state.IsNode
|
||||
|
||||
masterSecret := []byte("Constellation")
|
||||
|
||||
stor := store.NewStdStore()
|
||||
stwrapper := StoreWrapper{Store: stor}
|
||||
assert.NoError(stwrapper.PutState(state.AcceptingInit))
|
||||
assert.NoError(stwrapper.PutMasterSecret(masterSecret))
|
||||
|
||||
// save values to store
|
||||
tx, err := stor.BeginTransaction()
|
||||
assert.NoError(err)
|
||||
txdata := StoreWrapper{tx}
|
||||
assert.NoError(txdata.PutState(curState))
|
||||
assert.NoError(tx.Commit())
|
||||
|
||||
// see if we can retrieve them again
|
||||
savedState, err := stwrapper.GetState()
|
||||
assert.NoError(err)
|
||||
assert.Equal(curState, savedState)
|
||||
savedSecret, err := stwrapper.GetMasterSecret()
|
||||
assert.NoError(err)
|
||||
assert.Equal(masterSecret, savedSecret)
|
||||
}
|
||||
|
||||
func TestStoreWrapperDefaults(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
stor := store.NewStdStore()
|
||||
stwrapper := StoreWrapper{Store: stor}
|
||||
assert.NoError(stwrapper.PutState(state.AcceptingInit))
|
||||
|
||||
statevalue, err := stwrapper.GetState()
|
||||
assert.NoError(err)
|
||||
assert.Equal(state.AcceptingInit, statevalue)
|
||||
}
|
||||
|
||||
func TestStoreWrapperRollback(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
stor := store.NewStdStore()
|
||||
stwrapper := StoreWrapper{Store: stor}
|
||||
assert.NoError(stwrapper.PutState(state.AcceptingInit))
|
||||
|
||||
assert.NoError(stwrapper.PutClusterID([]byte{1, 2, 3}))
|
||||
|
||||
c1 := []byte{2, 3, 4}
|
||||
c2 := []byte{3, 4, 5}
|
||||
|
||||
tx, err := stor.BeginTransaction()
|
||||
assert.NoError(err)
|
||||
assert.NoError(StoreWrapper{tx}.PutClusterID(c1))
|
||||
assert.NoError(tx.Commit())
|
||||
|
||||
tx, err = stor.BeginTransaction()
|
||||
assert.NoError(err)
|
||||
assert.NoError(StoreWrapper{tx}.PutClusterID(c2))
|
||||
tx.Rollback()
|
||||
|
||||
val, err := stwrapper.GetClusterID()
|
||||
assert.NoError(err)
|
||||
assert.Equal(c1, val)
|
||||
}
|
||||
|
||||
func TestStoreWrapperPeerInterface(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
stor := store.NewStdStore()
|
||||
stwrapper := StoreWrapper{Store: stor}
|
||||
assert.NoError(stwrapper.PutState(state.AcceptingInit))
|
||||
|
||||
key, err := wgtypes.GeneratePrivateKey()
|
||||
assert.NoError(err)
|
||||
|
||||
ip := "192.0.2.1"
|
||||
internalIP := "10.118.2.0"
|
||||
|
||||
validPeer := peer.Peer{
|
||||
PublicIP: ip,
|
||||
VPNPubKey: key[:],
|
||||
VPNIP: internalIP,
|
||||
}
|
||||
require.NoError(stwrapper.PutPeer(validPeer))
|
||||
data, err := stwrapper.GetPeers()
|
||||
require.NoError(err)
|
||||
require.Equal(1, len(data))
|
||||
assert.Equal(ip, data[0].PublicIP)
|
||||
assert.Equal(key[:], data[0].VPNPubKey)
|
||||
assert.Equal(internalIP, data[0].VPNIP)
|
||||
|
||||
invalidPeer := peer.Peer{
|
||||
PublicIP: ip,
|
||||
VPNPubKey: key[:],
|
||||
VPNIP: "",
|
||||
}
|
||||
assert.Error(stwrapper.PutPeer(invalidPeer))
|
||||
}
|
||||
|
||||
func TestGenerateNextNodeIP(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
stor := store.NewStdStore()
|
||||
stwrapper := StoreWrapper{Store: stor}
|
||||
require.NoError(stwrapper.PutNextNodeIP(netip.AddrFrom4([4]byte{10, 118, 0, 11})))
|
||||
|
||||
ip, err := stwrapper.getNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 11}), ip)
|
||||
|
||||
ip, err = stwrapper.getNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 12}), ip)
|
||||
|
||||
ip, err = stwrapper.getNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 13}), ip)
|
||||
|
||||
for i := 0; i < 256*256-17; i++ {
|
||||
ip, err = stwrapper.getNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.NotEmpty(ip)
|
||||
}
|
||||
|
||||
ip, err = stwrapper.getNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 255, 253}), ip)
|
||||
|
||||
ip, err = stwrapper.getNextNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 255, 254}), ip)
|
||||
|
||||
// 10.118.255.255 (broadcast IP) should not be returned
|
||||
ip, err = stwrapper.getNextNodeIP()
|
||||
assert.Error(err)
|
||||
assert.Empty(ip)
|
||||
|
||||
// error should still persist
|
||||
ip, err = stwrapper.getNextNodeIP()
|
||||
assert.Error(err)
|
||||
assert.Empty(ip)
|
||||
}
|
||||
|
||||
func TestPopNextFreeNodeIP(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
stor := store.NewStdStore()
|
||||
stwrapper := StoreWrapper{Store: stor}
|
||||
require.NoError(stwrapper.PutNextNodeIP(netip.AddrFrom4([4]byte{10, 118, 0, 11})))
|
||||
|
||||
ip, err := stwrapper.PopNextFreeNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 11}), ip)
|
||||
|
||||
ip, err = stwrapper.PopNextFreeNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 12}), ip)
|
||||
|
||||
ip, err = stwrapper.PopNextFreeNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 13}), ip)
|
||||
|
||||
require.NoError(stwrapper.PutFreedNodeVPNIP("10.118.0.13"))
|
||||
require.NoError(stwrapper.PutFreedNodeVPNIP("10.118.0.12"))
|
||||
ipsInStore := map[netip.Addr]struct{}{
|
||||
netip.AddrFrom4([4]byte{10, 118, 0, 12}): {},
|
||||
netip.AddrFrom4([4]byte{10, 118, 0, 13}): {},
|
||||
}
|
||||
|
||||
ip, err = stwrapper.PopNextFreeNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Contains(ipsInStore, ip)
|
||||
delete(ipsInStore, ip)
|
||||
|
||||
ip, err = stwrapper.PopNextFreeNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Contains(ipsInStore, ip)
|
||||
delete(ipsInStore, ip)
|
||||
|
||||
ip, err = stwrapper.PopNextFreeNodeIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 14}), ip)
|
||||
}
|
||||
|
||||
func TestGenerateNextCoordinatorIP(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
stor := store.NewStdStore()
|
||||
stwrapper := StoreWrapper{Store: stor}
|
||||
require.NoError(stwrapper.PutNextCoordinatorIP(netip.AddrFrom4([4]byte{10, 118, 0, 1})))
|
||||
|
||||
ip, err := stwrapper.getNextCoordinatorIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 1}), ip)
|
||||
|
||||
ip, err = stwrapper.getNextCoordinatorIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 2}), ip)
|
||||
|
||||
ip, err = stwrapper.getNextCoordinatorIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 3}), ip)
|
||||
|
||||
for i := 0; i < 7; i++ {
|
||||
ip, err = stwrapper.getNextCoordinatorIP()
|
||||
assert.NoError(err)
|
||||
assert.NotEmpty(ip)
|
||||
}
|
||||
|
||||
// 10.118.0.11 (first Node IP) should not be returned
|
||||
ip, err = stwrapper.getNextCoordinatorIP()
|
||||
assert.Error(err)
|
||||
assert.Empty(ip)
|
||||
|
||||
// error should still persist
|
||||
ip, err = stwrapper.getNextCoordinatorIP()
|
||||
assert.Error(err)
|
||||
assert.Empty(ip)
|
||||
}
|
||||
|
||||
func TestPopNextFreeCoordinatorIP(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
stor := store.NewStdStore()
|
||||
stwrapper := StoreWrapper{Store: stor}
|
||||
require.NoError(stwrapper.PutNextCoordinatorIP(netip.AddrFrom4([4]byte{10, 118, 0, 1})))
|
||||
|
||||
ip, err := stwrapper.PopNextFreeCoordinatorIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 1}), ip)
|
||||
|
||||
ip, err = stwrapper.PopNextFreeCoordinatorIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 2}), ip)
|
||||
|
||||
ip, err = stwrapper.PopNextFreeCoordinatorIP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(netip.AddrFrom4([4]byte{10, 118, 0, 3}), ip)
|
||||
|
||||
for i := 0; i < 7; i++ {
|
||||
_, err = stwrapper.PopNextFreeCoordinatorIP()
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
ip, err = stwrapper.PopNextFreeCoordinatorIP()
|
||||
assert.Error(err)
|
||||
assert.Empty(ip)
|
||||
|
||||
require.NoError(stwrapper.PutFreedCoordinatorVPNIP("10.118.0.3"))
|
||||
require.NoError(stwrapper.PutFreedCoordinatorVPNIP("10.118.0.2"))
|
||||
ipsInStore := map[netip.Addr]struct{}{
|
||||
netip.AddrFrom4([4]byte{10, 118, 0, 3}): {},
|
||||
netip.AddrFrom4([4]byte{10, 118, 0, 2}): {},
|
||||
}
|
||||
|
||||
ip, err = stwrapper.PopNextFreeCoordinatorIP()
|
||||
assert.NoError(err)
|
||||
assert.Contains(ipsInStore, ip)
|
||||
delete(ipsInStore, ip)
|
||||
|
||||
ip, err = stwrapper.PopNextFreeCoordinatorIP()
|
||||
assert.NoError(err)
|
||||
assert.Contains(ipsInStore, ip)
|
||||
delete(ipsInStore, ip)
|
||||
|
||||
ip, err = stwrapper.PopNextFreeCoordinatorIP()
|
||||
assert.Error(err)
|
||||
assert.Equal(netip.Addr{}, ip)
|
||||
}
|
||||
|
||||
func TestGetFreedVPNIP(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
stor := store.NewStdStore()
|
||||
stwrapper := StoreWrapper{Store: stor}
|
||||
require.NoError(stwrapper.PutFreedCoordinatorVPNIP("203.0.113.1"))
|
||||
require.NoError(stwrapper.PutFreedCoordinatorVPNIP("203.0.113.2"))
|
||||
ipsInStore := map[netip.Addr]struct{}{
|
||||
netip.AddrFrom4([4]byte{203, 0, 113, 1}): {},
|
||||
netip.AddrFrom4([4]byte{203, 0, 113, 2}): {},
|
||||
}
|
||||
|
||||
ip, err := stwrapper.getFreedVPNIP(prefixFreeCoordinatorIPs)
|
||||
require.NoError(err)
|
||||
assert.Contains(ipsInStore, ip)
|
||||
delete(ipsInStore, ip)
|
||||
|
||||
ip, err = stwrapper.getFreedVPNIP(prefixFreeCoordinatorIPs)
|
||||
require.NoError(err)
|
||||
assert.Contains(ipsInStore, ip)
|
||||
delete(ipsInStore, ip)
|
||||
|
||||
ip, err = stwrapper.getFreedVPNIP(prefixFreeCoordinatorIPs)
|
||||
var noElementsError *store.NoElementsLeftError
|
||||
assert.ErrorAs(err, &noElementsError)
|
||||
assert.Equal(netip.Addr{}, ip)
|
||||
}
|
@ -1,85 +0,0 @@
|
||||
// Package vpnapi implements the API that a coordinator exposes inside the VPN.
|
||||
package vpnapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
gpeer "google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/status"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
// API is the API.
|
||||
type API struct {
|
||||
logger *zap.Logger
|
||||
core Core
|
||||
vpnproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
// New creates a new API.
|
||||
func New(logger *zap.Logger, core Core) *API {
|
||||
return &API{
|
||||
logger: logger,
|
||||
core: core,
|
||||
}
|
||||
}
|
||||
|
||||
// GetUpdate returns updated information to a node. It also recognizes the call as a node's heartbeat.
|
||||
func (a *API) GetUpdate(ctx context.Context, in *vpnproto.GetUpdateRequest) (*vpnproto.GetUpdateResponse, error) {
|
||||
if client, ok := gpeer.FromContext(ctx); ok {
|
||||
a.core.NotifyNodeHeartbeat(client.Addr)
|
||||
} else {
|
||||
a.logger.DPanic("Failed to get peer info from context.")
|
||||
}
|
||||
|
||||
resourceVersion, peers, err := a.core.GetPeers(int(in.ResourceVersion))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return &vpnproto.GetUpdateResponse{ResourceVersion: int64(resourceVersion), Peers: peer.ToVPNProto(peers)}, nil
|
||||
}
|
||||
|
||||
// GetK8SJoinArgs is the RPC call to get the K8s join args.
|
||||
func (a *API) GetK8SJoinArgs(ctx context.Context, in *vpnproto.GetK8SJoinArgsRequest) (*vpnproto.GetK8SJoinArgsResponse, error) {
|
||||
args, err := a.core.GetK8sJoinArgs(context.TODO())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
return &vpnproto.GetK8SJoinArgsResponse{
|
||||
ApiServerEndpoint: args.APIServerEndpoint,
|
||||
Token: args.Token,
|
||||
DiscoveryTokenCaCertHash: args.CACertHashes[0],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetK8SCertificateKey is the RPC call to get the K8s certificateKey necessary for control-plane join.
|
||||
func (a *API) GetK8SCertificateKey(ctx context.Context, in *vpnproto.GetK8SCertificateKeyRequest) (*vpnproto.GetK8SCertificateKeyResponse, error) {
|
||||
certKey, err := a.core.GetK8SCertificateKey(context.TODO())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
return &vpnproto.GetK8SCertificateKeyResponse{CertificateKey: certKey}, nil
|
||||
}
|
||||
|
||||
// GetDataKey returns a data key derived from the Constellation's master secret.
|
||||
func (a *API) GetDataKey(ctx context.Context, in *vpnproto.GetDataKeyRequest) (*vpnproto.GetDataKeyResponse, error) {
|
||||
key, err := a.core.GetDataKey(ctx, in.DataKeyId, int(in.Length))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
return &vpnproto.GetDataKeyResponse{DataKey: key}, nil
|
||||
}
|
||||
|
||||
type Core interface {
|
||||
GetPeers(resourceVersion int) (int, []peer.Peer, error)
|
||||
NotifyNodeHeartbeat(net.Addr)
|
||||
GetK8sJoinArgs(ctx context.Context) (*kubeadm.BootstrapTokenDiscovery, error)
|
||||
GetK8SCertificateKey(ctx context.Context) (string, error)
|
||||
GetDataKey(ctx context.Context, dataKeyID string, length int) ([]byte, error)
|
||||
}
|
@ -1,218 +0,0 @@
|
||||
package vpnapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"go.uber.org/zap/zaptest"
|
||||
gpeer "google.golang.org/grpc/peer"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestGetUpdate(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
clientIP := &net.IPAddr{IP: net.ParseIP("192.0.2.1")}
|
||||
peer1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}}
|
||||
peer2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}}
|
||||
peer3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "192.0.2.23", VPNPubKey: []byte{3, 4, 5}}
|
||||
|
||||
testCases := map[string]struct {
|
||||
clientAddr net.Addr
|
||||
peers []peer.Peer
|
||||
getPeersErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"0 peers": {
|
||||
clientAddr: clientIP,
|
||||
peers: []peer.Peer{},
|
||||
},
|
||||
"1 peer": {
|
||||
clientAddr: clientIP,
|
||||
peers: []peer.Peer{peer1},
|
||||
},
|
||||
"2 peers": {
|
||||
clientAddr: clientIP,
|
||||
peers: []peer.Peer{peer1, peer2},
|
||||
},
|
||||
"3 peers": {
|
||||
clientAddr: clientIP,
|
||||
peers: []peer.Peer{peer1, peer2, peer3},
|
||||
},
|
||||
"nil peers": {
|
||||
clientAddr: clientIP,
|
||||
peers: nil,
|
||||
},
|
||||
"getPeers error": {
|
||||
clientAddr: clientIP,
|
||||
getPeersErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
"missing client addr": {
|
||||
peers: []peer.Peer{peer1},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
const serverResourceVersion = 2
|
||||
const clientResourceVersion = 3
|
||||
|
||||
core := &stubCore{peers: tc.peers, serverResourceVersion: serverResourceVersion, getPeersErr: tc.getPeersErr}
|
||||
api := New(zaptest.NewLogger(t), core)
|
||||
|
||||
ctx := context.Background()
|
||||
if tc.clientAddr != nil {
|
||||
ctx = gpeer.NewContext(ctx, &gpeer.Peer{Addr: tc.clientAddr})
|
||||
}
|
||||
|
||||
resp, err := api.GetUpdate(ctx, &vpnproto.GetUpdateRequest{ResourceVersion: clientResourceVersion})
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
assert.EqualValues(serverResourceVersion, resp.ResourceVersion)
|
||||
assert.Equal([]int{clientResourceVersion}, core.clientResourceVersions)
|
||||
|
||||
require.Len(resp.Peers, len(tc.peers))
|
||||
for i, actual := range resp.Peers {
|
||||
want := tc.peers[i]
|
||||
assert.EqualValues(want.PublicIP, actual.PublicIp)
|
||||
assert.EqualValues(want.VPNIP, actual.VpnIp)
|
||||
assert.Equal(want.VPNPubKey, actual.VpnPubKey)
|
||||
}
|
||||
|
||||
if tc.clientAddr == nil {
|
||||
assert.Empty(core.heartbeats)
|
||||
} else {
|
||||
assert.Equal([]net.Addr{tc.clientAddr}, core.heartbeats)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetK8SJoinArgs(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
joinArgs := kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "endp",
|
||||
Token: "token",
|
||||
CACertHashes: []string{"dis"},
|
||||
}
|
||||
api := New(zaptest.NewLogger(t), &stubCore{joinArgs: joinArgs})
|
||||
|
||||
resp, err := api.GetK8SJoinArgs(context.Background(), &vpnproto.GetK8SJoinArgsRequest{})
|
||||
require.NoError(err)
|
||||
assert.Equal(joinArgs.APIServerEndpoint, resp.ApiServerEndpoint)
|
||||
assert.Equal(joinArgs.Token, resp.Token)
|
||||
assert.Equal(joinArgs.CACertHashes[0], resp.DiscoveryTokenCaCertHash)
|
||||
}
|
||||
|
||||
func TestGetDataKey(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
core := &stubCore{derivedKey: []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5}}
|
||||
api := New(zaptest.NewLogger(t), core)
|
||||
res, err := api.GetDataKey(context.Background(), &vpnproto.GetDataKeyRequest{DataKeyId: "key-1", Length: 32})
|
||||
require.NoError(err)
|
||||
assert.Equal(core.derivedKey, res.DataKey)
|
||||
|
||||
api = New(zaptest.NewLogger(t), &stubCore{deriveKeyErr: errors.New("error")})
|
||||
res, err = api.GetDataKey(context.Background(), &vpnproto.GetDataKeyRequest{DataKeyId: "key-1", Length: 32})
|
||||
assert.Error(err)
|
||||
assert.Nil(res)
|
||||
}
|
||||
|
||||
func TestGetK8SCertificateKey(t *testing.T) {
|
||||
someErr := errors.New("someErr")
|
||||
certKey := "kubeadmKey"
|
||||
|
||||
testCases := map[string]struct {
|
||||
certKey string
|
||||
getCertKeyErr error
|
||||
wantErr bool
|
||||
}{
|
||||
"basic": {
|
||||
certKey: certKey,
|
||||
},
|
||||
"error": {
|
||||
getCertKeyErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
core := &stubCore{
|
||||
kubeadmCertificateKey: certKey,
|
||||
getCertKeyErr: tc.getCertKeyErr,
|
||||
}
|
||||
|
||||
api := New(zaptest.NewLogger(t), core)
|
||||
resp, err := api.GetK8SCertificateKey(context.Background(), &vpnproto.GetK8SCertificateKeyRequest{})
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(certKey, resp.CertificateKey)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type stubCore struct {
|
||||
peers []peer.Peer
|
||||
serverResourceVersion int
|
||||
getPeersErr error
|
||||
clientResourceVersions []int
|
||||
heartbeats []net.Addr
|
||||
joinArgs kubeadm.BootstrapTokenDiscovery
|
||||
kubeadmCertificateKey string
|
||||
getCertKeyErr error
|
||||
derivedKey []byte
|
||||
deriveKeyErr error
|
||||
}
|
||||
|
||||
func (c *stubCore) GetPeers(resourceVersion int) (int, []peer.Peer, error) {
|
||||
c.clientResourceVersions = append(c.clientResourceVersions, resourceVersion)
|
||||
return c.serverResourceVersion, c.peers, c.getPeersErr
|
||||
}
|
||||
|
||||
func (c *stubCore) NotifyNodeHeartbeat(addr net.Addr) {
|
||||
c.heartbeats = append(c.heartbeats, addr)
|
||||
}
|
||||
|
||||
func (c *stubCore) GetK8sJoinArgs(context.Context) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return &c.joinArgs, nil
|
||||
}
|
||||
|
||||
func (c *stubCore) GetK8SCertificateKey(context.Context) (string, error) {
|
||||
return c.kubeadmCertificateKey, c.getCertKeyErr
|
||||
}
|
||||
|
||||
func (c *stubCore) GetDataKey(ctx context.Context, dataKeyID string, length int) ([]byte, error) {
|
||||
if c.deriveKeyErr != nil {
|
||||
return nil, c.deriveKeyErr
|
||||
}
|
||||
return c.derivedKey, nil
|
||||
}
|
@ -1,733 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc v3.20.1
|
||||
// source: vpnapi.proto
|
||||
|
||||
package vpnproto
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type GetUpdateRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ResourceVersion int64 `protobuf:"varint,1,opt,name=resource_version,json=resourceVersion,proto3" json:"resource_version,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetUpdateRequest) Reset() {
|
||||
*x = GetUpdateRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_vpnapi_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetUpdateRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetUpdateRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetUpdateRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_vpnapi_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetUpdateRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetUpdateRequest) Descriptor() ([]byte, []int) {
|
||||
return file_vpnapi_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *GetUpdateRequest) GetResourceVersion() int64 {
|
||||
if x != nil {
|
||||
return x.ResourceVersion
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type GetUpdateResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ResourceVersion int64 `protobuf:"varint,1,opt,name=resource_version,json=resourceVersion,proto3" json:"resource_version,omitempty"`
|
||||
Peers []*Peer `protobuf:"bytes,2,rep,name=peers,proto3" json:"peers,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetUpdateResponse) Reset() {
|
||||
*x = GetUpdateResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_vpnapi_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetUpdateResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetUpdateResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetUpdateResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_vpnapi_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetUpdateResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetUpdateResponse) Descriptor() ([]byte, []int) {
|
||||
return file_vpnapi_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *GetUpdateResponse) GetResourceVersion() int64 {
|
||||
if x != nil {
|
||||
return x.ResourceVersion
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *GetUpdateResponse) GetPeers() []*Peer {
|
||||
if x != nil {
|
||||
return x.Peers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetK8SJoinArgsRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *GetK8SJoinArgsRequest) Reset() {
|
||||
*x = GetK8SJoinArgsRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_vpnapi_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetK8SJoinArgsRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetK8SJoinArgsRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetK8SJoinArgsRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_vpnapi_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetK8SJoinArgsRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetK8SJoinArgsRequest) Descriptor() ([]byte, []int) {
|
||||
return file_vpnapi_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
type GetK8SJoinArgsResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ApiServerEndpoint string `protobuf:"bytes,1,opt,name=api_server_endpoint,json=apiServerEndpoint,proto3" json:"api_server_endpoint,omitempty"`
|
||||
Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
|
||||
DiscoveryTokenCaCertHash string `protobuf:"bytes,3,opt,name=discovery_token_ca_cert_hash,json=discoveryTokenCaCertHash,proto3" json:"discovery_token_ca_cert_hash,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetK8SJoinArgsResponse) Reset() {
|
||||
*x = GetK8SJoinArgsResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_vpnapi_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetK8SJoinArgsResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetK8SJoinArgsResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetK8SJoinArgsResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_vpnapi_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetK8SJoinArgsResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetK8SJoinArgsResponse) Descriptor() ([]byte, []int) {
|
||||
return file_vpnapi_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *GetK8SJoinArgsResponse) GetApiServerEndpoint() string {
|
||||
if x != nil {
|
||||
return x.ApiServerEndpoint
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *GetK8SJoinArgsResponse) GetToken() string {
|
||||
if x != nil {
|
||||
return x.Token
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *GetK8SJoinArgsResponse) GetDiscoveryTokenCaCertHash() string {
|
||||
if x != nil {
|
||||
return x.DiscoveryTokenCaCertHash
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetK8SCertificateKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *GetK8SCertificateKeyRequest) Reset() {
|
||||
*x = GetK8SCertificateKeyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_vpnapi_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetK8SCertificateKeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetK8SCertificateKeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetK8SCertificateKeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_vpnapi_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetK8SCertificateKeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetK8SCertificateKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_vpnapi_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
type GetK8SCertificateKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
CertificateKey string `protobuf:"bytes,1,opt,name=certificateKey,proto3" json:"certificateKey,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetK8SCertificateKeyResponse) Reset() {
|
||||
*x = GetK8SCertificateKeyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_vpnapi_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetK8SCertificateKeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetK8SCertificateKeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetK8SCertificateKeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_vpnapi_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetK8SCertificateKeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetK8SCertificateKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_vpnapi_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *GetK8SCertificateKeyResponse) GetCertificateKey() string {
|
||||
if x != nil {
|
||||
return x.CertificateKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PublicIp string `protobuf:"bytes,1,opt,name=public_ip,json=publicIp,proto3" json:"public_ip,omitempty"`
|
||||
VpnIp string `protobuf:"bytes,2,opt,name=vpn_ip,json=vpnIp,proto3" json:"vpn_ip,omitempty"`
|
||||
VpnPubKey []byte `protobuf:"bytes,3,opt,name=vpn_pub_key,json=vpnPubKey,proto3" json:"vpn_pub_key,omitempty"`
|
||||
Role uint32 `protobuf:"varint,4,opt,name=role,proto3" json:"role,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Peer) Reset() {
|
||||
*x = Peer{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_vpnapi_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Peer) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Peer) ProtoMessage() {}
|
||||
|
||||
func (x *Peer) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_vpnapi_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Peer.ProtoReflect.Descriptor instead.
|
||||
func (*Peer) Descriptor() ([]byte, []int) {
|
||||
return file_vpnapi_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *Peer) GetPublicIp() string {
|
||||
if x != nil {
|
||||
return x.PublicIp
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Peer) GetVpnIp() string {
|
||||
if x != nil {
|
||||
return x.VpnIp
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Peer) GetVpnPubKey() []byte {
|
||||
if x != nil {
|
||||
return x.VpnPubKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Peer) GetRole() uint32 {
|
||||
if x != nil {
|
||||
return x.Role
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type GetDataKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
DataKeyId string `protobuf:"bytes,1,opt,name=data_key_id,json=dataKeyId,proto3" json:"data_key_id,omitempty"`
|
||||
Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetDataKeyRequest) Reset() {
|
||||
*x = GetDataKeyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_vpnapi_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetDataKeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetDataKeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetDataKeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_vpnapi_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetDataKeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetDataKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_vpnapi_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *GetDataKeyRequest) GetDataKeyId() string {
|
||||
if x != nil {
|
||||
return x.DataKeyId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *GetDataKeyRequest) GetLength() uint32 {
|
||||
if x != nil {
|
||||
return x.Length
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type GetDataKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
DataKey []byte `protobuf:"bytes,1,opt,name=data_key,json=dataKey,proto3" json:"data_key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetDataKeyResponse) Reset() {
|
||||
*x = GetDataKeyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_vpnapi_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetDataKeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetDataKeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetDataKeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_vpnapi_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetDataKeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetDataKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_vpnapi_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *GetDataKeyResponse) GetDataKey() []byte {
|
||||
if x != nil {
|
||||
return x.DataKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_vpnapi_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_vpnapi_proto_rawDesc = []byte{
|
||||
0x0a, 0x0c, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06,
|
||||
0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x22, 0x3d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x55, 0x70, 0x64,
|
||||
0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65,
|
||||
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65,
|
||||
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x62, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x55, 0x70, 0x64, 0x61,
|
||||
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65,
|
||||
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65,
|
||||
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x02,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x65,
|
||||
0x65, 0x72, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x47, 0x65, 0x74,
|
||||
0x4b, 0x38, 0x73, 0x4a, 0x6f, 0x69, 0x6e, 0x41, 0x72, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4b, 0x38, 0x73, 0x4a, 0x6f, 0x69,
|
||||
0x6e, 0x41, 0x72, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a,
|
||||
0x13, 0x61, 0x70, 0x69, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70,
|
||||
0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x61, 0x70, 0x69, 0x53,
|
||||
0x65, 0x72, 0x76, 0x65, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a,
|
||||
0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f,
|
||||
0x6b, 0x65, 0x6e, 0x12, 0x3e, 0x0a, 0x1c, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79,
|
||||
0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x68,
|
||||
0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x69, 0x73, 0x63, 0x6f,
|
||||
0x76, 0x65, 0x72, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x43, 0x61, 0x43, 0x65, 0x72, 0x74, 0x48,
|
||||
0x61, 0x73, 0x68, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x4b, 0x38, 0x73, 0x43, 0x65, 0x72,
|
||||
0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x22, 0x46, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4b, 0x38, 0x73, 0x43, 0x65, 0x72, 0x74,
|
||||
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
|
||||
0x65, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x65, 0x72, 0x74,
|
||||
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x6e, 0x0a, 0x04, 0x50, 0x65,
|
||||
0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x69, 0x70, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x49, 0x70, 0x12,
|
||||
0x15, 0x0a, 0x06, 0x76, 0x70, 0x6e, 0x5f, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x05, 0x76, 0x70, 0x6e, 0x49, 0x70, 0x12, 0x1e, 0x0a, 0x0b, 0x76, 0x70, 0x6e, 0x5f, 0x70, 0x75,
|
||||
0x62, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x76, 0x70, 0x6e,
|
||||
0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x04,
|
||||
0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x22, 0x4b, 0x0a, 0x11, 0x47, 0x65,
|
||||
0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x1e, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12,
|
||||
0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
|
||||
0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x2f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x44, 0x61,
|
||||
0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a,
|
||||
0x08, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||
0x07, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x32, 0xc0, 0x02, 0x0a, 0x03, 0x41, 0x50, 0x49,
|
||||
0x12, 0x40, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e,
|
||||
0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4b, 0x38, 0x73, 0x4a, 0x6f, 0x69, 0x6e,
|
||||
0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65,
|
||||
0x74, 0x4b, 0x38, 0x73, 0x4a, 0x6f, 0x69, 0x6e, 0x41, 0x72, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74,
|
||||
0x4b, 0x38, 0x73, 0x4a, 0x6f, 0x69, 0x6e, 0x41, 0x72, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x12, 0x61, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x38, 0x73, 0x43, 0x65, 0x72,
|
||||
0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x2e, 0x76, 0x70,
|
||||
0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x38, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69,
|
||||
0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x24, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x38, 0x73,
|
||||
0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74,
|
||||
0x61, 0x4b, 0x65, 0x79, 0x12, 0x19, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65,
|
||||
0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x1a, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61,
|
||||
0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65,
|
||||
0x73, 0x73, 0x73, 0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2f,
|
||||
0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x70, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_vpnapi_proto_rawDescOnce sync.Once
|
||||
file_vpnapi_proto_rawDescData = file_vpnapi_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_vpnapi_proto_rawDescGZIP() []byte {
|
||||
file_vpnapi_proto_rawDescOnce.Do(func() {
|
||||
file_vpnapi_proto_rawDescData = protoimpl.X.CompressGZIP(file_vpnapi_proto_rawDescData)
|
||||
})
|
||||
return file_vpnapi_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_vpnapi_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||
var file_vpnapi_proto_goTypes = []interface{}{
|
||||
(*GetUpdateRequest)(nil), // 0: vpnapi.GetUpdateRequest
|
||||
(*GetUpdateResponse)(nil), // 1: vpnapi.GetUpdateResponse
|
||||
(*GetK8SJoinArgsRequest)(nil), // 2: vpnapi.GetK8sJoinArgsRequest
|
||||
(*GetK8SJoinArgsResponse)(nil), // 3: vpnapi.GetK8sJoinArgsResponse
|
||||
(*GetK8SCertificateKeyRequest)(nil), // 4: vpnapi.GetK8sCertificateKeyRequest
|
||||
(*GetK8SCertificateKeyResponse)(nil), // 5: vpnapi.GetK8sCertificateKeyResponse
|
||||
(*Peer)(nil), // 6: vpnapi.Peer
|
||||
(*GetDataKeyRequest)(nil), // 7: vpnapi.GetDataKeyRequest
|
||||
(*GetDataKeyResponse)(nil), // 8: vpnapi.GetDataKeyResponse
|
||||
}
|
||||
var file_vpnapi_proto_depIdxs = []int32{
|
||||
6, // 0: vpnapi.GetUpdateResponse.peers:type_name -> vpnapi.Peer
|
||||
0, // 1: vpnapi.API.GetUpdate:input_type -> vpnapi.GetUpdateRequest
|
||||
2, // 2: vpnapi.API.GetK8sJoinArgs:input_type -> vpnapi.GetK8sJoinArgsRequest
|
||||
4, // 3: vpnapi.API.GetK8sCertificateKey:input_type -> vpnapi.GetK8sCertificateKeyRequest
|
||||
7, // 4: vpnapi.API.GetDataKey:input_type -> vpnapi.GetDataKeyRequest
|
||||
1, // 5: vpnapi.API.GetUpdate:output_type -> vpnapi.GetUpdateResponse
|
||||
3, // 6: vpnapi.API.GetK8sJoinArgs:output_type -> vpnapi.GetK8sJoinArgsResponse
|
||||
5, // 7: vpnapi.API.GetK8sCertificateKey:output_type -> vpnapi.GetK8sCertificateKeyResponse
|
||||
8, // 8: vpnapi.API.GetDataKey:output_type -> vpnapi.GetDataKeyResponse
|
||||
5, // [5:9] is the sub-list for method output_type
|
||||
1, // [1:5] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_vpnapi_proto_init() }
|
||||
func file_vpnapi_proto_init() {
|
||||
if File_vpnapi_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_vpnapi_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetUpdateRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_vpnapi_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetUpdateResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_vpnapi_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetK8SJoinArgsRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_vpnapi_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetK8SJoinArgsResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_vpnapi_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetK8SCertificateKeyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_vpnapi_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetK8SCertificateKeyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_vpnapi_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Peer); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_vpnapi_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetDataKeyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_vpnapi_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetDataKeyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_vpnapi_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_vpnapi_proto_goTypes,
|
||||
DependencyIndexes: file_vpnapi_proto_depIdxs,
|
||||
MessageInfos: file_vpnapi_proto_msgTypes,
|
||||
}.Build()
|
||||
File_vpnapi_proto = out.File
|
||||
file_vpnapi_proto_rawDesc = nil
|
||||
file_vpnapi_proto_goTypes = nil
|
||||
file_vpnapi_proto_depIdxs = nil
|
||||
}
|
@ -1,53 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package vpnapi;
|
||||
|
||||
option go_package = "github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto";
|
||||
|
||||
service API {
|
||||
rpc GetUpdate(GetUpdateRequest) returns (GetUpdateResponse);
|
||||
rpc GetK8sJoinArgs(GetK8sJoinArgsRequest) returns (GetK8sJoinArgsResponse);
|
||||
rpc GetK8sCertificateKey(GetK8sCertificateKeyRequest) returns (GetK8sCertificateKeyResponse);
|
||||
rpc GetDataKey(GetDataKeyRequest) returns (GetDataKeyResponse);
|
||||
}
|
||||
|
||||
message GetUpdateRequest {
|
||||
int64 resource_version = 1;
|
||||
}
|
||||
|
||||
message GetUpdateResponse {
|
||||
int64 resource_version = 1;
|
||||
repeated Peer peers = 2;
|
||||
}
|
||||
|
||||
message GetK8sJoinArgsRequest {
|
||||
}
|
||||
|
||||
message GetK8sJoinArgsResponse {
|
||||
string api_server_endpoint = 1;
|
||||
string token = 2;
|
||||
string discovery_token_ca_cert_hash = 3;
|
||||
}
|
||||
|
||||
message GetK8sCertificateKeyRequest {
|
||||
}
|
||||
|
||||
message GetK8sCertificateKeyResponse {
|
||||
string certificateKey = 1;
|
||||
}
|
||||
|
||||
message Peer {
|
||||
string public_ip = 1;
|
||||
string vpn_ip = 2;
|
||||
bytes vpn_pub_key = 3;
|
||||
uint32 role = 4;
|
||||
}
|
||||
|
||||
message GetDataKeyRequest {
|
||||
string data_key_id = 1;
|
||||
uint32 length = 2;
|
||||
}
|
||||
|
||||
message GetDataKeyResponse {
|
||||
bytes data_key = 1;
|
||||
}
|
@ -1,213 +0,0 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.20.1
|
||||
// source: vpnapi.proto
|
||||
|
||||
package vpnproto
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// APIClient is the client API for API service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type APIClient interface {
|
||||
GetUpdate(ctx context.Context, in *GetUpdateRequest, opts ...grpc.CallOption) (*GetUpdateResponse, error)
|
||||
GetK8SJoinArgs(ctx context.Context, in *GetK8SJoinArgsRequest, opts ...grpc.CallOption) (*GetK8SJoinArgsResponse, error)
|
||||
GetK8SCertificateKey(ctx context.Context, in *GetK8SCertificateKeyRequest, opts ...grpc.CallOption) (*GetK8SCertificateKeyResponse, error)
|
||||
GetDataKey(ctx context.Context, in *GetDataKeyRequest, opts ...grpc.CallOption) (*GetDataKeyResponse, error)
|
||||
}
|
||||
|
||||
type aPIClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewAPIClient(cc grpc.ClientConnInterface) APIClient {
|
||||
return &aPIClient{cc}
|
||||
}
|
||||
|
||||
func (c *aPIClient) GetUpdate(ctx context.Context, in *GetUpdateRequest, opts ...grpc.CallOption) (*GetUpdateResponse, error) {
|
||||
out := new(GetUpdateResponse)
|
||||
err := c.cc.Invoke(ctx, "/vpnapi.API/GetUpdate", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) GetK8SJoinArgs(ctx context.Context, in *GetK8SJoinArgsRequest, opts ...grpc.CallOption) (*GetK8SJoinArgsResponse, error) {
|
||||
out := new(GetK8SJoinArgsResponse)
|
||||
err := c.cc.Invoke(ctx, "/vpnapi.API/GetK8sJoinArgs", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) GetK8SCertificateKey(ctx context.Context, in *GetK8SCertificateKeyRequest, opts ...grpc.CallOption) (*GetK8SCertificateKeyResponse, error) {
|
||||
out := new(GetK8SCertificateKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/vpnapi.API/GetK8sCertificateKey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) GetDataKey(ctx context.Context, in *GetDataKeyRequest, opts ...grpc.CallOption) (*GetDataKeyResponse, error) {
|
||||
out := new(GetDataKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/vpnapi.API/GetDataKey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// APIServer is the server API for API service.
|
||||
// All implementations must embed UnimplementedAPIServer
|
||||
// for forward compatibility
|
||||
type APIServer interface {
|
||||
GetUpdate(context.Context, *GetUpdateRequest) (*GetUpdateResponse, error)
|
||||
GetK8SJoinArgs(context.Context, *GetK8SJoinArgsRequest) (*GetK8SJoinArgsResponse, error)
|
||||
GetK8SCertificateKey(context.Context, *GetK8SCertificateKeyRequest) (*GetK8SCertificateKeyResponse, error)
|
||||
GetDataKey(context.Context, *GetDataKeyRequest) (*GetDataKeyResponse, error)
|
||||
mustEmbedUnimplementedAPIServer()
|
||||
}
|
||||
|
||||
// UnimplementedAPIServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedAPIServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedAPIServer) GetUpdate(context.Context, *GetUpdateRequest) (*GetUpdateResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetUpdate not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) GetK8SJoinArgs(context.Context, *GetK8SJoinArgsRequest) (*GetK8SJoinArgsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetK8SJoinArgs not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) GetK8SCertificateKey(context.Context, *GetK8SCertificateKeyRequest) (*GetK8SCertificateKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetK8SCertificateKey not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) GetDataKey(context.Context, *GetDataKeyRequest) (*GetDataKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetDataKey not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) mustEmbedUnimplementedAPIServer() {}
|
||||
|
||||
// UnsafeAPIServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to APIServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeAPIServer interface {
|
||||
mustEmbedUnimplementedAPIServer()
|
||||
}
|
||||
|
||||
func RegisterAPIServer(s grpc.ServiceRegistrar, srv APIServer) {
|
||||
s.RegisterService(&API_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _API_GetUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetUpdateRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).GetUpdate(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/vpnapi.API/GetUpdate",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).GetUpdate(ctx, req.(*GetUpdateRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_GetK8SJoinArgs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetK8SJoinArgsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).GetK8SJoinArgs(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/vpnapi.API/GetK8sJoinArgs",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).GetK8SJoinArgs(ctx, req.(*GetK8SJoinArgsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_GetK8SCertificateKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetK8SCertificateKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).GetK8SCertificateKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/vpnapi.API/GetK8sCertificateKey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).GetK8SCertificateKey(ctx, req.(*GetK8SCertificateKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_GetDataKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetDataKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).GetDataKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/vpnapi.API/GetDataKey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).GetDataKey(ctx, req.(*GetDataKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// API_ServiceDesc is the grpc.ServiceDesc for API service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var API_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "vpnapi.API",
|
||||
HandlerType: (*APIServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "GetUpdate",
|
||||
Handler: _API_GetUpdate_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetK8sJoinArgs",
|
||||
Handler: _API_GetK8SJoinArgs_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetK8sCertificateKey",
|
||||
Handler: _API_GetK8SCertificateKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetDataKey",
|
||||
Handler: _API_GetDataKey_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "vpnapi.proto",
|
||||
}
|
@ -1,247 +0,0 @@
|
||||
package wireguard
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/util"
|
||||
"github.com/vishvananda/netlink"
|
||||
"golang.zx2c4.com/wireguard/wgctrl"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
)
|
||||
|
||||
const (
|
||||
netInterface = "wg0"
|
||||
port = 51820
|
||||
)
|
||||
|
||||
type Wireguard struct {
|
||||
client wgClient
|
||||
}
|
||||
|
||||
func New() (*Wireguard, error) {
|
||||
client, err := wgctrl.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Wireguard{client: client}, nil
|
||||
}
|
||||
|
||||
func (w *Wireguard) Setup(privKey []byte) error {
|
||||
var key wgtypes.Key
|
||||
var err error
|
||||
if len(privKey) == 0 {
|
||||
key, err = wgtypes.GeneratePrivateKey()
|
||||
} else {
|
||||
key, err = wgtypes.NewKey(privKey)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
listenPort := port
|
||||
return w.client.ConfigureDevice(netInterface, wgtypes.Config{PrivateKey: &key, ListenPort: &listenPort})
|
||||
}
|
||||
|
||||
// GetPrivateKey returns the private key of the wireguard interface.
|
||||
func (w *Wireguard) GetPrivateKey() ([]byte, error) {
|
||||
device, err := w.client.Device(netInterface)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving wireguard private key from device %v: %w", netInterface, err)
|
||||
}
|
||||
return device.PrivateKey[:], nil
|
||||
}
|
||||
|
||||
func (w *Wireguard) DerivePublicKey(privKey []byte) ([]byte, error) {
|
||||
key, err := wgtypes.NewKey(privKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubkey := key.PublicKey()
|
||||
return pubkey[:], nil
|
||||
}
|
||||
|
||||
func (w *Wireguard) GetPublicKey() ([]byte, error) {
|
||||
deviceData, err := w.client.Device(netInterface)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return deviceData.PublicKey[:], nil
|
||||
}
|
||||
|
||||
func (w *Wireguard) GetInterfaceIP() (string, error) {
|
||||
return util.GetInterfaceIP(netInterface)
|
||||
}
|
||||
|
||||
// SetInterfaceIP sets the ip interface ip.
|
||||
func (w *Wireguard) SetInterfaceIP(ip string) error {
|
||||
addr, err := netlink.ParseAddr(ip + "/16")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
link, err := netlink.LinkByName(netInterface)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := netlink.AddrAdd(link, addr); err != nil {
|
||||
return err
|
||||
}
|
||||
return netlink.LinkSetUp(link)
|
||||
}
|
||||
|
||||
// AddPeer adds a new peer to a wireguard interface.
|
||||
func (w *Wireguard) AddPeer(pubKey []byte, publicIP string, vpnIP string) error {
|
||||
_, allowedIPs, err := net.ParseCIDR(vpnIP + "/32")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key, err := wgtypes.NewKey(pubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var endpoint *net.UDPAddr
|
||||
if ip := net.ParseIP(publicIP); ip != nil {
|
||||
endpoint = &net.UDPAddr{IP: ip, Port: port}
|
||||
}
|
||||
|
||||
keepAlive := 10 * time.Second
|
||||
cfg := wgtypes.Config{
|
||||
ReplacePeers: false,
|
||||
Peers: []wgtypes.PeerConfig{
|
||||
{
|
||||
PublicKey: key,
|
||||
UpdateOnly: false,
|
||||
Endpoint: endpoint,
|
||||
AllowedIPs: []net.IPNet{*allowedIPs},
|
||||
PersistentKeepaliveInterval: &keepAlive,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return prettyWgError(w.client.ConfigureDevice(netInterface, cfg))
|
||||
}
|
||||
|
||||
// RemovePeer removes a peer from the wireguard interface.
|
||||
func (w *Wireguard) RemovePeer(pubKey []byte) error {
|
||||
key, err := wgtypes.NewKey(pubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := wgtypes.Config{Peers: []wgtypes.PeerConfig{{PublicKey: key, Remove: true}}}
|
||||
|
||||
return prettyWgError(w.client.ConfigureDevice(netInterface, cfg))
|
||||
}
|
||||
|
||||
func prettyWgError(err error) error {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return errors.New("interface not found or is not a WireGuard interface")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *Wireguard) UpdatePeers(peers []peer.Peer) error {
|
||||
wgPeers, err := transformToWgpeer(peers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("transforming peers to wireguard-peers: %w", err)
|
||||
}
|
||||
|
||||
deviceData, err := w.client.Device(netInterface)
|
||||
if err != nil {
|
||||
return fmt.Errorf("obtaining device data: %w", err)
|
||||
}
|
||||
// convert to map for easier lookup
|
||||
storePeers := make(map[string]wgtypes.Peer)
|
||||
for _, p := range wgPeers {
|
||||
storePeers[p.AllowedIPs[0].String()] = p
|
||||
}
|
||||
var added []wgtypes.Peer
|
||||
var removed []wgtypes.Peer
|
||||
|
||||
for _, interfacePeer := range deviceData.Peers {
|
||||
if updPeer, ok := storePeers[interfacePeer.AllowedIPs[0].String()]; ok {
|
||||
if !bytes.Equal(updPeer.PublicKey[:], interfacePeer.PublicKey[:]) {
|
||||
added = append(added, updPeer)
|
||||
removed = append(removed, interfacePeer)
|
||||
}
|
||||
delete(storePeers, updPeer.AllowedIPs[0].String())
|
||||
} else {
|
||||
removed = append(removed, interfacePeer)
|
||||
}
|
||||
}
|
||||
// remaining store peers are new ones
|
||||
for _, peer := range storePeers {
|
||||
added = append(added, peer)
|
||||
}
|
||||
|
||||
keepAlive := 10 * time.Second
|
||||
var newPeerConfig []wgtypes.PeerConfig
|
||||
for _, peer := range removed {
|
||||
newPeerConfig = append(newPeerConfig, wgtypes.PeerConfig{
|
||||
// pub Key for remove matching is enought
|
||||
PublicKey: peer.PublicKey,
|
||||
Remove: true,
|
||||
})
|
||||
}
|
||||
for _, peer := range added {
|
||||
newPeerConfig = append(newPeerConfig, wgtypes.PeerConfig{
|
||||
PublicKey: peer.PublicKey,
|
||||
Remove: false,
|
||||
UpdateOnly: false,
|
||||
Endpoint: peer.Endpoint,
|
||||
AllowedIPs: peer.AllowedIPs,
|
||||
// needed, otherwise gRPC has problems establishing the initial connection.
|
||||
PersistentKeepaliveInterval: &keepAlive,
|
||||
})
|
||||
}
|
||||
if len(newPeerConfig) == 0 {
|
||||
return nil
|
||||
}
|
||||
cfg := wgtypes.Config{
|
||||
ReplacePeers: false,
|
||||
Peers: newPeerConfig,
|
||||
}
|
||||
return prettyWgError(w.client.ConfigureDevice(netInterface, cfg))
|
||||
}
|
||||
|
||||
func (w *Wireguard) Close() error {
|
||||
return w.client.Close()
|
||||
}
|
||||
|
||||
// A wgClient is a type which can control a WireGuard device.
|
||||
type wgClient interface {
|
||||
io.Closer
|
||||
Device(name string) (*wgtypes.Device, error)
|
||||
ConfigureDevice(name string, cfg wgtypes.Config) error
|
||||
}
|
||||
|
||||
func transformToWgpeer(corePeers []peer.Peer) ([]wgtypes.Peer, error) {
|
||||
var wgPeers []wgtypes.Peer
|
||||
for _, peer := range corePeers {
|
||||
key, err := wgtypes.NewKey(peer.VPNPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, allowedIPs, err := net.ParseCIDR(peer.VPNIP + "/32")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var endpoint *net.UDPAddr
|
||||
if ip := net.ParseIP(peer.PublicIP); ip != nil {
|
||||
endpoint = &net.UDPAddr{IP: ip, Port: port}
|
||||
}
|
||||
wgPeers = append(wgPeers, wgtypes.Peer{
|
||||
PublicKey: key,
|
||||
Endpoint: endpoint,
|
||||
AllowedIPs: []net.IPNet{*allowedIPs},
|
||||
})
|
||||
}
|
||||
return wgPeers, nil
|
||||
}
|
@ -1,157 +0,0 @@
|
||||
package wireguard
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestUpdatePeer(t *testing.T) {
|
||||
requirePre := require.New(t)
|
||||
|
||||
firstKey, err := wgtypes.GenerateKey()
|
||||
requirePre.NoError(err)
|
||||
peer1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "192.0.2.21", VPNPubKey: firstKey[:]}
|
||||
firstKeyUpd, err := wgtypes.GenerateKey()
|
||||
requirePre.NoError(err)
|
||||
peer1KeyUpd := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "192.0.2.21", VPNPubKey: firstKeyUpd[:]}
|
||||
secondKey, err := wgtypes.GenerateKey()
|
||||
requirePre.NoError(err)
|
||||
peer2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "192.0.2.22", VPNPubKey: secondKey[:]}
|
||||
thirdKey, err := wgtypes.GenerateKey()
|
||||
requirePre.NoError(err)
|
||||
peer3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "192.0.2.23", VPNPubKey: thirdKey[:]}
|
||||
fourthKey, err := wgtypes.GenerateKey()
|
||||
requirePre.NoError(err)
|
||||
peerAdmin := peer.Peer{PublicIP: "192.0.2.10", VPNIP: "192.0.2.25", VPNPubKey: fourthKey[:]}
|
||||
peerAdminNoEndp := peer.Peer{VPNIP: "192.0.2.25", VPNPubKey: fourthKey[:]}
|
||||
|
||||
checkError := func(peers []wgtypes.Peer, err error) []wgtypes.Peer {
|
||||
requirePre.NoError(err)
|
||||
return peers
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
storePeers []peer.Peer
|
||||
vpnPeers []wgtypes.Peer
|
||||
excludedIP map[string]struct{}
|
||||
wantErr bool
|
||||
wantVPNPeers []wgtypes.Peer
|
||||
}{
|
||||
"basic": {
|
||||
storePeers: []peer.Peer{peer1, peer3},
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2})),
|
||||
wantVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer3})),
|
||||
},
|
||||
"previously empty": {
|
||||
storePeers: []peer.Peer{peer1, peer2},
|
||||
wantVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2})),
|
||||
},
|
||||
"no changes": {
|
||||
storePeers: []peer.Peer{peer1, peer2},
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2})),
|
||||
wantVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2})),
|
||||
},
|
||||
"key update": {
|
||||
storePeers: []peer.Peer{peer1KeyUpd, peer3},
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2})),
|
||||
wantVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1KeyUpd, peer3})),
|
||||
},
|
||||
"not update Endpoint changes": {
|
||||
storePeers: []peer.Peer{peerAdminNoEndp, peer3},
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peerAdmin, peer3})),
|
||||
wantVPNPeers: checkError(transformToWgpeer([]peer.Peer{peerAdmin, peer3})),
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
fakewg := fakewgClient{}
|
||||
fakewg.devices = make(map[string]*wgtypes.Device)
|
||||
wg := Wireguard{client: &fakewg}
|
||||
|
||||
fakewg.devices[netInterface] = &wgtypes.Device{Peers: tc.vpnPeers}
|
||||
|
||||
updateErr := wg.UpdatePeers(tc.storePeers)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(updateErr)
|
||||
return
|
||||
}
|
||||
require.NoError(updateErr)
|
||||
|
||||
assert.ElementsMatch(tc.wantVPNPeers, fakewg.devices[netInterface].Peers)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakewgClient struct {
|
||||
devices map[string]*wgtypes.Device
|
||||
}
|
||||
|
||||
func (w *fakewgClient) Device(name string) (*wgtypes.Device, error) {
|
||||
if val, ok := w.devices[name]; ok {
|
||||
return val, nil
|
||||
}
|
||||
return nil, errors.New("device does not exist")
|
||||
}
|
||||
|
||||
func (w *fakewgClient) ConfigureDevice(name string, cfg wgtypes.Config) error {
|
||||
var newPeerList []wgtypes.Peer
|
||||
var operation bool
|
||||
vpnPeers := make(map[wgtypes.Key]wgtypes.Peer)
|
||||
|
||||
for _, peer := range w.devices[netInterface].Peers {
|
||||
vpnPeers[peer.PublicKey] = peer
|
||||
}
|
||||
|
||||
for _, configPeer := range cfg.Peers {
|
||||
operation = false
|
||||
for _, vpnPeer := range w.devices[netInterface].Peers {
|
||||
// wireguard matches internally via pubkey
|
||||
if vpnPeer.PublicKey == configPeer.PublicKey {
|
||||
operation = true
|
||||
if configPeer.Remove {
|
||||
delete(vpnPeers, vpnPeer.PublicKey)
|
||||
continue
|
||||
}
|
||||
if configPeer.UpdateOnly {
|
||||
vpnPeers[vpnPeer.PublicKey] = wgtypes.Peer{
|
||||
PublicKey: vpnPeer.PublicKey,
|
||||
AllowedIPs: vpnPeer.AllowedIPs,
|
||||
Endpoint: configPeer.Endpoint,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !operation {
|
||||
vpnPeers[configPeer.PublicKey] = wgtypes.Peer{
|
||||
PublicKey: configPeer.PublicKey,
|
||||
AllowedIPs: configPeer.AllowedIPs,
|
||||
Endpoint: configPeer.Endpoint,
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, peer := range vpnPeers {
|
||||
newPeerList = append(newPeerList, peer)
|
||||
}
|
||||
w.devices[netInterface].Peers = newPeerList
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fakewgClient) Close() error {
|
||||
return nil
|
||||
}
|
5
go.mod
5
go.mod
@ -113,6 +113,11 @@ require (
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.100.2 // indirect
|
||||
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c // indirect
|
||||
|
4
go.sum
4
go.sum
@ -504,10 +504,12 @@ github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzA
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
@ -1335,8 +1337,10 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM=
|
||||
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
|
59
internal/cloud/metadata/metadata.go
Normal file
59
internal/cloud/metadata/metadata.go
Normal file
@ -0,0 +1,59 @@
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
)
|
||||
|
||||
// Instance describes metadata of a peer.
|
||||
type InstanceMetadata struct {
|
||||
Name string
|
||||
ProviderID string
|
||||
Role role.Role
|
||||
PrivateIPs []string
|
||||
PublicIPs []string
|
||||
AliasIPRanges []string
|
||||
// SSHKeys maps usernames to ssh public keys.
|
||||
SSHKeys map[string][]string
|
||||
}
|
||||
|
||||
type metadataAPI interface {
|
||||
// List retrieves all instances belonging to the current constellation.
|
||||
List(ctx context.Context) ([]InstanceMetadata, error)
|
||||
// Self retrieves the current instance.
|
||||
Self(ctx context.Context) (InstanceMetadata, error)
|
||||
// SignalRole signals the constellation role via cloud provider metadata (if supported by the CSP and deployment type, otherwise does nothing).
|
||||
SignalRole(ctx context.Context, role role.Role) error
|
||||
// SetVPNIP stores the internally used VPN IP in cloud provider metadata (if supported and required for autoscaling by the CSP, otherwise does nothing).
|
||||
SetVPNIP(ctx context.Context, vpnIP string) error
|
||||
// Supported is used to determine if metadata API is implemented for this cloud provider.
|
||||
Supported() bool
|
||||
}
|
||||
|
||||
// TODO(katexochen): Rename to InitEndpoints
|
||||
func CoordinatorEndpoints(ctx context.Context, api metadataAPI) ([]string, error) {
|
||||
if !api.Supported() {
|
||||
return nil, errors.New("retrieving instances list from cloud provider is not yet supported")
|
||||
}
|
||||
instances, err := api.List(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving instances list from cloud provider: %w", err)
|
||||
}
|
||||
coordinatorEndpoints := []string{}
|
||||
for _, instance := range instances {
|
||||
// check if role of instance is "Coordinator"
|
||||
if instance.Role == role.Coordinator {
|
||||
for _, ip := range instance.PrivateIPs {
|
||||
coordinatorEndpoints = append(coordinatorEndpoints, net.JoinHostPort(ip, strconv.Itoa(constants.CoordinatorPort)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return coordinatorEndpoints, nil
|
||||
}
|
@ -22,7 +22,9 @@ const (
|
||||
// Ports.
|
||||
//
|
||||
|
||||
ActivationServicePort = 9090
|
||||
// ActivationServiePort is the port for reaching the activation service within Kubernetes.
|
||||
ActivationServicePort = 9090
|
||||
// ActivationServiceNodePort is the port for reaching the activation service outside of Kubernetes.
|
||||
ActivationServiceNodePort = 30090
|
||||
VerifyServicePortHTTP = 8080
|
||||
VerifyServicePortGRPC = 9090
|
||||
|
@ -1,11 +1,11 @@
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/initproto"
|
||||
)
|
||||
|
||||
// FromProtoSlice converts a SSH UserKey definition from pubproto to the Go flavor.
|
||||
func FromProtoSlice(input []*pubproto.SSHUserKey) []UserKey {
|
||||
func FromProtoSlice(input []*initproto.SSHUserKey) []UserKey {
|
||||
if input == nil {
|
||||
return nil
|
||||
}
|
||||
@ -25,14 +25,14 @@ func FromProtoSlice(input []*pubproto.SSHUserKey) []UserKey {
|
||||
}
|
||||
|
||||
// ToProtoSlice converts a SSH UserKey definition from Go to pubproto flavor.
|
||||
func ToProtoSlice(input []*UserKey) []*pubproto.SSHUserKey {
|
||||
func ToProtoSlice(input []*UserKey) []*initproto.SSHUserKey {
|
||||
if input == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
output := make([]*pubproto.SSHUserKey, 0)
|
||||
output := make([]*initproto.SSHUserKey, 0)
|
||||
for _, pair := range input {
|
||||
singlePair := pubproto.SSHUserKey{
|
||||
singlePair := initproto.SSHUserKey{
|
||||
Username: pair.Username,
|
||||
PublicKey: pair.PublicKey,
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/initproto"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -39,7 +39,7 @@ func TestATLSCredentials(t *testing.T) {
|
||||
for i := 0; i < serverCount; i++ {
|
||||
api := &fakeAPI{}
|
||||
server := grpc.NewServer(grpc.Creds(serverCreds))
|
||||
pubproto.RegisterAPIServer(server, api)
|
||||
initproto.RegisterAPIServer(server, api)
|
||||
|
||||
listener := bufconn.Listen(1024)
|
||||
listeners = append(listeners, listener)
|
||||
@ -66,8 +66,8 @@ func TestATLSCredentials(t *testing.T) {
|
||||
require.NoError(err)
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
_, err = client.GetState(context.Background(), &pubproto.GetStateRequest{})
|
||||
client := initproto.NewAPIClient(conn)
|
||||
_, err = client.Init(context.Background(), &initproto.InitRequest{})
|
||||
}()
|
||||
}
|
||||
|
||||
@ -112,9 +112,9 @@ type fakeDoc struct {
|
||||
}
|
||||
|
||||
type fakeAPI struct {
|
||||
pubproto.UnimplementedAPIServer
|
||||
initproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func (f *fakeAPI) GetState(ctx context.Context, in *pubproto.GetStateRequest) (*pubproto.GetStateResponse, error) {
|
||||
return &pubproto.GetStateResponse{State: 1}, nil
|
||||
func (f *fakeAPI) Init(ctx context.Context, in *initproto.InitRequest) (*initproto.InitResponse, error) {
|
||||
return &initproto.InitResponse{}, nil
|
||||
}
|
||||
|
63
internal/grpc/retry/retry.go
Normal file
63
internal/grpc/retry/retry.go
Normal file
@ -0,0 +1,63 @@
|
||||
package retry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
type IntervalRetryer struct {
|
||||
interval time.Duration
|
||||
doer Doer
|
||||
clock clock.WithTicker
|
||||
}
|
||||
|
||||
func NewIntervalRetryer(doer Doer, interval time.Duration) *IntervalRetryer {
|
||||
return &IntervalRetryer{
|
||||
interval: interval,
|
||||
doer: doer,
|
||||
clock: clock.RealClock{},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *IntervalRetryer) Do(ctx context.Context) error {
|
||||
ticker := r.clock.NewTicker(r.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
err := r.doer.Do(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !r.serviceIsUnavailable(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done(): // TODO(katexochen): is this necessary?
|
||||
return ctx.Err()
|
||||
case <-ticker.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *IntervalRetryer) serviceIsUnavailable(err error) bool {
|
||||
statusErr, ok := status.FromError(err)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if statusErr.Code() != codes.Unavailable {
|
||||
return false
|
||||
}
|
||||
// ideally we would check the error type directly, but grpc only provides a string
|
||||
return strings.HasPrefix(statusErr.Message(), `connection error: desc = "transport: authentication handshake failed`)
|
||||
}
|
||||
|
||||
type Doer interface {
|
||||
Do(ctx context.Context) error
|
||||
}
|
@ -1,151 +0,0 @@
|
||||
package statuswaiter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"google.golang.org/grpc"
|
||||
grpccodes "google.golang.org/grpc/codes"
|
||||
grpcstatus "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Waiter waits for PeerStatusServer to reach a specific state. The waiter needs
|
||||
// to be initialized before usage.
|
||||
type Waiter struct {
|
||||
initialized bool
|
||||
interval time.Duration
|
||||
newConn func(ctx context.Context, target string, opts ...grpc.DialOption) (ClientConn, error)
|
||||
newClient func(cc grpc.ClientConnInterface) pubproto.APIClient
|
||||
}
|
||||
|
||||
// New returns a default Waiter with probing inteval of 10 seconds,
|
||||
// attested gRPC connection and PeerStatusClient.
|
||||
func New() *Waiter {
|
||||
return &Waiter{
|
||||
interval: 10 * time.Second,
|
||||
newClient: pubproto.NewAPIClient,
|
||||
}
|
||||
}
|
||||
|
||||
// InitializeValidators initializes the validators for the attestation.
|
||||
func (w *Waiter) InitializeValidators(validators []atls.Validator) error {
|
||||
if len(validators) == 0 {
|
||||
return errors.New("no validators provided to initialize status waiter")
|
||||
}
|
||||
w.newConn = newAttestedConnGenerator(validators)
|
||||
w.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitFor waits for a PeerStatusServer, which is reachable under the given endpoint
|
||||
// to reach the specified state.
|
||||
func (w *Waiter) WaitFor(ctx context.Context, endpoint string, status ...state.State) error {
|
||||
if !w.initialized {
|
||||
return errors.New("waiter not initialized")
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(w.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Check once before waiting
|
||||
resp, err := w.probe(ctx, endpoint)
|
||||
if err != nil && (grpcstatus.Code(err) != grpccodes.Unavailable || isGRPCHandshakeError(err)) {
|
||||
return err
|
||||
}
|
||||
if resp != nil && containsState(state.State(resp.State), status...) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Periodically check status again
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
resp, err := w.probe(ctx, endpoint)
|
||||
if grpcstatus.Code(err) == grpccodes.Unavailable && !isGRPCHandshakeError(err) {
|
||||
// The server isn't reachable yet.
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if containsState(state.State(resp.State), status...) {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// probe sends a PeerStatusCheck request to a PeerStatusServer and returns the response.
|
||||
func (w *Waiter) probe(ctx context.Context, endpoint string) (*pubproto.GetStateResponse, error) {
|
||||
conn, err := w.newConn(ctx, endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := w.newClient(conn)
|
||||
return client.GetState(ctx, &pubproto.GetStateRequest{})
|
||||
}
|
||||
|
||||
// WaitForAll waits for a list of PeerStatusServers, which listen on the handed
|
||||
// endpoints, to reach the specified state.
|
||||
func (w *Waiter) WaitForAll(ctx context.Context, endpoints []string, status ...state.State) error {
|
||||
if !w.initialized {
|
||||
return errors.New("waiter not initialized")
|
||||
}
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
if err := w.WaitFor(ctx, endpoint, status...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newAttestedConnGenerator creates a function returning a default attested grpc connection.
|
||||
func newAttestedConnGenerator(validators []atls.Validator) func(ctx context.Context, target string, opts ...grpc.DialOption) (ClientConn, error) {
|
||||
return func(ctx context.Context, target string, opts ...grpc.DialOption) (ClientConn, error) {
|
||||
creds := atlscredentials.New(nil, validators)
|
||||
|
||||
return grpc.DialContext(
|
||||
ctx, target, grpc.WithTransportCredentials(creds),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ClientConn is the gRPC connection a PeerStatusClient uses to connect to a server.
|
||||
type ClientConn interface {
|
||||
grpc.ClientConnInterface
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// containsState checks if current state is one of the given states.
|
||||
func containsState(s state.State, states ...state.State) bool {
|
||||
for _, state := range states {
|
||||
if state == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isGRPCHandshakeError(err error) bool {
|
||||
statusErr, ok := grpcstatus.FromError(err)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if statusErr.Code() != grpccodes.Unavailable {
|
||||
return false
|
||||
}
|
||||
// ideally we would check the error type directly, but grpc only provides a string
|
||||
return strings.HasPrefix(statusErr.Message(), `connection error: desc = "transport: authentication handshake failed`)
|
||||
}
|
@ -1,312 +0,0 @@
|
||||
package statuswaiter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/goleak"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/test/bufconn"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestInitializeValidators(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
waiter := Waiter{
|
||||
interval: time.Millisecond,
|
||||
newClient: stubNewClientFunc(&stubPeerStatusClient{state: state.IsNode}),
|
||||
}
|
||||
|
||||
// Uninitialized waiter fails.
|
||||
assert.Error(waiter.WaitFor(context.Background(), "someIP", state.IsNode))
|
||||
|
||||
// Initializing waiter with no validators fails
|
||||
assert.Error(waiter.InitializeValidators(nil))
|
||||
|
||||
// Initialized waiter succeeds
|
||||
assert.NoError(waiter.InitializeValidators(atls.NewFakeValidators(oid.Dummy{})))
|
||||
assert.NoError(waiter.WaitFor(context.Background(), "someIP", state.IsNode))
|
||||
}
|
||||
|
||||
func TestWaitForAndWaitForAll(t *testing.T) {
|
||||
var noErr error
|
||||
someErr := errors.New("failed")
|
||||
handshakeErr := status.Error(codes.Unavailable, `connection error: desc = "transport: authentication handshake failed"`)
|
||||
|
||||
testCases := map[string]struct {
|
||||
waiter Waiter
|
||||
waitForState []state.State
|
||||
wantErr bool
|
||||
}{
|
||||
"successful wait": {
|
||||
waiter: Waiter{
|
||||
initialized: true,
|
||||
interval: time.Millisecond,
|
||||
newConn: stubNewConnFunc(noErr),
|
||||
newClient: stubNewClientFunc(&stubPeerStatusClient{state: state.IsNode}),
|
||||
},
|
||||
waitForState: []state.State{state.IsNode},
|
||||
},
|
||||
"successful wait multi states": {
|
||||
waiter: Waiter{
|
||||
initialized: true,
|
||||
interval: time.Millisecond,
|
||||
newConn: stubNewConnFunc(noErr),
|
||||
newClient: stubNewClientFunc(&stubPeerStatusClient{state: state.IsNode}),
|
||||
},
|
||||
waitForState: []state.State{state.IsNode, state.ActivatingNodes},
|
||||
},
|
||||
"expect timeout": {
|
||||
waiter: Waiter{
|
||||
initialized: true,
|
||||
interval: time.Millisecond,
|
||||
newConn: stubNewConnFunc(noErr),
|
||||
newClient: stubNewClientFunc(&stubPeerStatusClient{state: state.AcceptingInit}),
|
||||
},
|
||||
waitForState: []state.State{state.IsNode},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail to check call": {
|
||||
waiter: Waiter{
|
||||
initialized: true,
|
||||
interval: time.Millisecond,
|
||||
newConn: stubNewConnFunc(noErr),
|
||||
newClient: stubNewClientFunc(&stubPeerStatusClient{checkErr: someErr}),
|
||||
},
|
||||
waitForState: []state.State{state.IsNode},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail to create conn": {
|
||||
waiter: Waiter{
|
||||
initialized: true,
|
||||
interval: time.Millisecond,
|
||||
newConn: stubNewConnFunc(someErr),
|
||||
newClient: stubNewClientFunc(&stubPeerStatusClient{}),
|
||||
},
|
||||
waitForState: []state.State{state.IsNode},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail TLS handshake": {
|
||||
waiter: Waiter{
|
||||
initialized: true,
|
||||
interval: time.Millisecond,
|
||||
newConn: stubNewConnFunc(handshakeErr),
|
||||
newClient: stubNewClientFunc(&stubPeerStatusClient{state: state.IsNode}),
|
||||
},
|
||||
waitForState: []state.State{state.IsNode},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("WaitFor", func(t *testing.T) {
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 50*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
err := tc.waiter.WaitFor(ctx, "someIP", tc.waitForState...)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("WaitForAll", func(t *testing.T) {
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 50*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
endpoints := []string{"192.0.2.1", "192.0.2.2", "192.0.2.3"}
|
||||
err := tc.waiter.WaitForAll(ctx, endpoints, tc.waitForState...)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func stubNewConnFunc(errStub error) func(ctx context.Context, target string, opts ...grpc.DialOption) (ClientConn, error) {
|
||||
return func(ctx context.Context, target string, opts ...grpc.DialOption) (ClientConn, error) {
|
||||
return &stubClientConn{}, errStub
|
||||
}
|
||||
}
|
||||
|
||||
type stubClientConn struct{}
|
||||
|
||||
func (c *stubClientConn) Invoke(ctx context.Context, method string, args any, reply any, opts ...grpc.CallOption) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *stubClientConn) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *stubClientConn) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func stubNewClientFunc(stubClient pubproto.APIClient) func(cc grpc.ClientConnInterface) pubproto.APIClient {
|
||||
return func(cc grpc.ClientConnInterface) pubproto.APIClient {
|
||||
return stubClient
|
||||
}
|
||||
}
|
||||
|
||||
type stubPeerStatusClient struct {
|
||||
state state.State
|
||||
checkErr error
|
||||
pubproto.APIClient
|
||||
}
|
||||
|
||||
func (c *stubPeerStatusClient) GetState(ctx context.Context, in *pubproto.GetStateRequest, opts ...grpc.CallOption) (*pubproto.GetStateResponse, error) {
|
||||
resp := &pubproto.GetStateResponse{State: uint32(c.state)}
|
||||
return resp, c.checkErr
|
||||
}
|
||||
|
||||
func TestContainsState(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
s state.State
|
||||
states []state.State
|
||||
success bool
|
||||
}{
|
||||
"is state": {
|
||||
s: state.IsNode,
|
||||
states: []state.State{
|
||||
state.IsNode,
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
"is state multi": {
|
||||
s: state.AcceptingInit,
|
||||
states: []state.State{
|
||||
state.AcceptingInit,
|
||||
state.ActivatingNodes,
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
"is not state": {
|
||||
s: state.NodeWaitingForClusterJoin,
|
||||
states: []state.State{
|
||||
state.AcceptingInit,
|
||||
},
|
||||
},
|
||||
"is not state multi": {
|
||||
s: state.NodeWaitingForClusterJoin,
|
||||
states: []state.State{
|
||||
state.AcceptingInit,
|
||||
state.ActivatingNodes,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
res := containsState(tc.s, tc.states...)
|
||||
assert.Equal(tc.success, res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsHandshakeError(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
err error
|
||||
wantedResult bool
|
||||
}{
|
||||
"TLS handshake error": {
|
||||
err: getGRPCHandshakeError(),
|
||||
wantedResult: true,
|
||||
},
|
||||
"Unavailable error": {
|
||||
err: status.Error(codes.Unavailable, "connection error"),
|
||||
wantedResult: false,
|
||||
},
|
||||
"TLS handshake error with wrong code": {
|
||||
err: status.Error(codes.Aborted, `connection error: desc = "transport: authentication handshake failed`),
|
||||
wantedResult: false,
|
||||
},
|
||||
"Non gRPC error": {
|
||||
err: errors.New("error"),
|
||||
wantedResult: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
res := isGRPCHandshakeError(tc.err)
|
||||
assert.Equal(tc.wantedResult, res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getGRPCHandshakeError() error {
|
||||
serverCreds := atlscredentials.New(atls.NewFakeIssuer(oid.Dummy{}), nil)
|
||||
api := &fakeAPI{}
|
||||
server := grpc.NewServer(grpc.Creds(serverCreds))
|
||||
pubproto.RegisterAPIServer(server, api)
|
||||
|
||||
listener := bufconn.Listen(1024)
|
||||
defer server.GracefulStop()
|
||||
go server.Serve(listener)
|
||||
|
||||
clientCreds := atlscredentials.New(nil, []atls.Validator{failingValidator{oid.Dummy{}}})
|
||||
conn, err := grpc.DialContext(context.Background(), "", grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return listener.Dial()
|
||||
}), grpc.WithTransportCredentials(clientCreds))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
_, err = client.GetState(context.Background(), &pubproto.GetStateRequest{})
|
||||
return err
|
||||
}
|
||||
|
||||
type failingValidator struct {
|
||||
oid.Getter
|
||||
}
|
||||
|
||||
func (v failingValidator) Validate(attDoc []byte, nonce []byte) ([]byte, error) {
|
||||
return nil, errors.New("error")
|
||||
}
|
||||
|
||||
type fakeAPI struct {
|
||||
pubproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func (f *fakeAPI) GetState(ctx context.Context, in *pubproto.GetStateRequest) (*pubproto.GetStateResponse, error) {
|
||||
return &pubproto.GetStateResponse{State: 1}, nil
|
||||
}
|
@ -24,16 +24,6 @@ RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v${GEN_GO_VER} && \
|
||||
|
||||
# Generate code for every existing proto file
|
||||
|
||||
## Coordinator pubapi
|
||||
WORKDIR /pubapi
|
||||
COPY coordinator/pubapi/pubproto/*.proto /pubapi
|
||||
RUN protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative *.proto
|
||||
|
||||
## Coordinator vpnapi
|
||||
WORKDIR /vpnapi
|
||||
COPY coordinator/vpnapi/vpnproto/*.proto /vpnapi
|
||||
RUN protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative *.proto
|
||||
|
||||
## disk-mapper keyservice api
|
||||
WORKDIR /disk-mapper
|
||||
COPY state/keyservice/keyproto/*.proto /disk-mapper
|
||||
@ -57,13 +47,16 @@ RUN protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_o
|
||||
## verify
|
||||
WORKDIR /verify
|
||||
COPY verify/verifyproto/*.proto /verify
|
||||
|
||||
## init
|
||||
WORKDIR /init
|
||||
COPY coordinator/initproto/*.proto /init
|
||||
RUN protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative *.proto
|
||||
|
||||
FROM scratch as export
|
||||
COPY --from=build /pubapi/*.go coordinator/pubapi/pubproto/
|
||||
COPY --from=build /vpnapi/*.go coordinator/vpnapi/vpnproto/
|
||||
COPY --from=build /disk-mapper/*.go state/keyservice/keyproto/
|
||||
COPY --from=build /service/*.go debugd/service/
|
||||
COPY --from=build /kms/*.go kms/kmsproto/
|
||||
COPY --from=build /activation/*.go activation/activationproto/
|
||||
COPY --from=build /verify/*.go verify/verifyproto/
|
||||
COPY --from=build /init/*.go coordinator/initproto/
|
||||
|
@ -48,7 +48,7 @@ func main() {
|
||||
// set up metadata API and quote issuer for aTLS connections
|
||||
var err error
|
||||
var diskPath string
|
||||
var issuer core.QuoteIssuer
|
||||
var issuer keyservice.QuoteIssuer
|
||||
var metadata core.ProviderMetadata
|
||||
switch strings.ToLower(*csp) {
|
||||
case "azure":
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/logger"
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
"github.com/edgelesssys/constellation/state/keyservice/keyproto"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
@ -24,8 +25,8 @@ import (
|
||||
type KeyAPI struct {
|
||||
log *logger.Logger
|
||||
mux sync.Mutex
|
||||
metadata core.ProviderMetadata
|
||||
issuer core.QuoteIssuer
|
||||
metadata ProviderMetadata
|
||||
issuer QuoteIssuer
|
||||
key []byte
|
||||
keyReceived chan struct{}
|
||||
timeout time.Duration
|
||||
@ -33,7 +34,7 @@ type KeyAPI struct {
|
||||
}
|
||||
|
||||
// New initializes a KeyAPI with the given parameters.
|
||||
func New(log *logger.Logger, issuer core.QuoteIssuer, metadata core.ProviderMetadata, timeout time.Duration) *KeyAPI {
|
||||
func New(log *logger.Logger, issuer QuoteIssuer, metadata core.ProviderMetadata, timeout time.Duration) *KeyAPI {
|
||||
return &KeyAPI{
|
||||
log: log,
|
||||
metadata: metadata,
|
||||
@ -136,3 +137,17 @@ func (a *KeyAPI) requestKey(uuid string, credentials credentials.TransportCreden
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
// QuoteValidator validates quotes.
|
||||
type QuoteValidator interface {
|
||||
oid.Getter
|
||||
// Validate validates a quote and returns the user data on success.
|
||||
Validate(attDoc []byte, nonce []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// QuoteIssuer issues quotes.
|
||||
type QuoteIssuer interface {
|
||||
oid.Getter
|
||||
// Issue issues a quote for remote attestation for a given message
|
||||
Issue(userData []byte, nonce []byte) (quote []byte, err error)
|
||||
}
|
||||
|
@ -1,588 +0,0 @@
|
||||
//go:build integration
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/store"
|
||||
"github.com/edgelesssys/constellation/coordinator/storewrapper"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
kms "github.com/edgelesssys/constellation/kms/setup"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"go.uber.org/zap"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
/*
|
||||
Notes regarding the integration test implementation:
|
||||
|
||||
Scale:
|
||||
'numberPeers' should be < 30, otherwise activation might stuck, because something with the docker network
|
||||
doesn't scale well (maybe > 50 wireguard kernel interfaces are the reason).
|
||||
With over 150 nodes, the node activation will fail due to Docker internal network naming issues.
|
||||
This could be further extended, but currently the number of possible nodes is enough for this test.
|
||||
|
||||
Usage of docker library:
|
||||
Sometimes the API calls are slower than using the 'sh docker ...' commands. This is specifically the case
|
||||
for the termination. However, to keep the code clean, we accept this tradeoff and use the library functions.
|
||||
*/
|
||||
|
||||
const (
|
||||
publicgRPCPort = "9000"
|
||||
constellationImageName = "constellation:latest"
|
||||
etcdImageName = "bitnami/etcd:3.5.2"
|
||||
etcdOverlayNetwork = "constellationIntegrationTest"
|
||||
masterSecret = "ConstellationIntegrationTest"
|
||||
localLogDirectory = "/tmp/coordinator/logs"
|
||||
numberFirstActivation = 3
|
||||
numberSecondaryActivation = 3
|
||||
numberThirdActivation = 3
|
||||
)
|
||||
|
||||
var (
|
||||
hostconfigConstellationPeer = &container.HostConfig{
|
||||
Binds: []string{"/dev/net/tun:/dev/net/tun"}, // necessary for wireguard interface creation
|
||||
CapAdd: strslice.StrSlice{"NET_ADMIN"}, // necessary for wireguard interface creation
|
||||
AutoRemove: true,
|
||||
}
|
||||
configConstellationPeer = &container.Config{
|
||||
Image: constellationImageName,
|
||||
AttachStdout: true, // necessary to attach to the container log
|
||||
AttachStderr: true, // necessary to attach to the container log
|
||||
Tty: true, // necessary to attach to the container log
|
||||
}
|
||||
|
||||
hostconfigEtcd = &container.HostConfig{
|
||||
AutoRemove: true,
|
||||
}
|
||||
configEtcd = &container.Config{
|
||||
Image: etcdImageName,
|
||||
Env: []string{
|
||||
"ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379",
|
||||
"ETCD_ADVERTISE_CLIENT_URLS=http://127.0.0.1:2379",
|
||||
"ETCD_LOG_LEVEL=debug",
|
||||
"ETCD_DATA_DIR=/bitnami/etcd/data",
|
||||
},
|
||||
Entrypoint: []string{"/opt/bitnami/etcd/bin/etcd"},
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
Tty: true,
|
||||
}
|
||||
|
||||
constellationDockerImageBuildOptions = types.ImageBuildOptions{
|
||||
Dockerfile: "test/Dockerfile",
|
||||
Tags: []string{constellationImageName},
|
||||
Remove: true,
|
||||
ForceRemove: true,
|
||||
SuppressOutput: false,
|
||||
PullParent: true,
|
||||
}
|
||||
containerLogConfig = types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
Follow: true,
|
||||
}
|
||||
|
||||
wgExecConfig = types.ExecConfig{
|
||||
Cmd: []string{"wg"},
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
}
|
||||
pingExecConfig = types.ExecConfig{
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
}
|
||||
activeCoordinators []string
|
||||
coordinatorCounter int
|
||||
nodeCounter int
|
||||
)
|
||||
|
||||
type peerInfo struct {
|
||||
dockerData container.ContainerCreateCreatedBody
|
||||
isCoordinator bool
|
||||
vpnIP string
|
||||
}
|
||||
|
||||
func TestMain(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
activePeers := make(map[string]peerInfo)
|
||||
|
||||
defer goleak.VerifyNone(t,
|
||||
// https://github.com/census-instrumentation/opencensus-go/issues/1262
|
||||
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
require.NoError(err)
|
||||
require.NoError(os.Chdir(filepath.Join(cwd, "..")))
|
||||
require.NoError(createTempDir())
|
||||
// setup Docker containers
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
require.NoError(err)
|
||||
defer cli.Close()
|
||||
|
||||
versionInfo, err := cli.Info(ctx)
|
||||
require.NoError(err)
|
||||
t.Logf("start integration test, local docker version %v", versionInfo.ServerVersion)
|
||||
|
||||
require.NoError(imageBuild(ctx, cli))
|
||||
defer cli.ImageRemove(ctx, constellationImageName, types.ImageRemoveOptions{Force: true, PruneChildren: true})
|
||||
|
||||
reader, err := cli.ImagePull(ctx, etcdImageName, types.ImagePullOptions{})
|
||||
require.NoError(err)
|
||||
_, err = io.Copy(os.Stdout, reader)
|
||||
require.NoError(err)
|
||||
require.NoError(reader.Close())
|
||||
|
||||
// Add another docker network to be able to resolve etcd-storage from the coordinator.
|
||||
// This is not possible in the default "bridge" network.
|
||||
dockerNetwork, err := cli.NetworkCreate(ctx, etcdOverlayNetwork, types.NetworkCreate{Driver: "bridge", Internal: true})
|
||||
require.NoError(err)
|
||||
defer cli.NetworkRemove(ctx, etcdOverlayNetwork)
|
||||
|
||||
// setup etcd
|
||||
t.Log("create etcd container...")
|
||||
respEtcd, err := cli.ContainerCreate(ctx, configEtcd, hostconfigEtcd, nil, nil, "etcd-storage")
|
||||
require.NoError(err)
|
||||
require.NoError(cli.ContainerStart(ctx, respEtcd.ID, types.ContainerStartOptions{}))
|
||||
defer killDockerContainer(ctx, cli, respEtcd)
|
||||
require.NoError(cli.NetworkConnect(ctx, dockerNetwork.ID, respEtcd.ID, nil))
|
||||
etcdData, err := cli.ContainerInspect(ctx, respEtcd.ID)
|
||||
require.NoError(err)
|
||||
etcdIPAddr := etcdData.NetworkSettings.DefaultNetworkSettings.IPAddress
|
||||
etcdstore, err := store.NewEtcdStore(net.JoinHostPort(etcdIPAddr, "2379"), false, zap.NewNop())
|
||||
require.NoError(err)
|
||||
defer etcdstore.Close()
|
||||
|
||||
defer killDockerContainers(ctx, cli, activePeers)
|
||||
// setup coordinator containers
|
||||
t.Log("create 1st coordinator container...")
|
||||
require.NoError(createCoordinatorContainer(ctx, cli, "master-1", dockerNetwork.ID, activePeers))
|
||||
t.Log("create 2nd coordinator container...")
|
||||
require.NoError(createCoordinatorContainer(ctx, cli, "master-2", dockerNetwork.ID, activePeers))
|
||||
// 1st activation phase
|
||||
ips, err := spawnContainers(ctx, cli, numberFirstActivation, activePeers)
|
||||
require.NoError(err)
|
||||
|
||||
t.Logf("node ips: %v", ips)
|
||||
t.Log("activate coordinator...")
|
||||
start := time.Now()
|
||||
assert.NoError(startCoordinator(ctx, activeCoordinators[0], ips))
|
||||
elapsed := time.Since(start)
|
||||
t.Logf("activation took %v", elapsed)
|
||||
// activate additional coordinator
|
||||
require.NoError(addNewCoordinatorToCoordinator(ctx, activeCoordinators[1], activeCoordinators[0]))
|
||||
require.NoError(updateVPNIPs(activePeers, etcdstore))
|
||||
|
||||
t.Log("count peers in instances")
|
||||
countPeersTest(ctx, t, cli, wgExecConfig, activePeers)
|
||||
t.Log("start ping test")
|
||||
pingTest(ctx, t, cli, pingExecConfig, activePeers, etcdstore)
|
||||
|
||||
// 2nd activation phase
|
||||
ips, err = spawnContainers(ctx, cli, numberSecondaryActivation, activePeers)
|
||||
require.NoError(err)
|
||||
t.Logf("node ips: %v", ips)
|
||||
t.Log("add additional nodes")
|
||||
start = time.Now()
|
||||
assert.NoError(addNewNodesToCoordinator(ctx, activeCoordinators[1], ips))
|
||||
elapsed = time.Since(start)
|
||||
t.Logf("adding took %v", elapsed)
|
||||
require.NoError(updateVPNIPs(activePeers, etcdstore))
|
||||
|
||||
t.Log("count peers in instances")
|
||||
countPeersTest(ctx, t, cli, wgExecConfig, activePeers)
|
||||
t.Log("start ping test")
|
||||
pingTest(ctx, t, cli, pingExecConfig, activePeers, etcdstore)
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
t.Log("create 3nd coordinator container...")
|
||||
require.NoError(createCoordinatorContainer(ctx, cli, "master-3", dockerNetwork.ID, activePeers))
|
||||
// activate additional coordinator
|
||||
require.NoError(addNewCoordinatorToCoordinator(ctx, activeCoordinators[2], activeCoordinators[1]))
|
||||
require.NoError(updateVPNIPs(activePeers, etcdstore))
|
||||
|
||||
// 3rd activation phase
|
||||
ips, err = spawnContainers(ctx, cli, numberThirdActivation, activePeers)
|
||||
require.NoError(err)
|
||||
t.Logf("node ips: %v", ips)
|
||||
t.Log("add additional nodes")
|
||||
start = time.Now()
|
||||
assert.NoError(addNewNodesToCoordinator(ctx, activeCoordinators[2], ips))
|
||||
elapsed = time.Since(start)
|
||||
t.Logf("adding took %v", elapsed)
|
||||
require.NoError(updateVPNIPs(activePeers, etcdstore))
|
||||
|
||||
t.Log("count peers in instances")
|
||||
countPeersTest(ctx, t, cli, wgExecConfig, activePeers)
|
||||
t.Log("start ping test")
|
||||
pingTest(ctx, t, cli, pingExecConfig, activePeers, etcdstore)
|
||||
}
|
||||
|
||||
// helper methods
|
||||
func startCoordinator(ctx context.Context, coordinatorAddr string, ips []string) error {
|
||||
creds := atlscredentials.New(nil, atls.NewFakeValidators(oid.Dummy{}))
|
||||
|
||||
conn, err := grpc.DialContext(ctx, net.JoinHostPort(coordinatorAddr, publicgRPCPort), grpc.WithTransportCredentials(creds))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
adminKey, err := wgtypes.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
adminKey = adminKey.PublicKey()
|
||||
|
||||
stream, err := client.ActivateAsCoordinator(ctx, &pubproto.ActivateAsCoordinatorRequest{
|
||||
AdminVpnPubKey: adminKey[:],
|
||||
NodePublicIps: ips,
|
||||
MasterSecret: []byte(masterSecret),
|
||||
KmsUri: kms.ClusterKMSURI,
|
||||
StorageUri: kms.NoStoreURI,
|
||||
KeyEncryptionKeyId: "",
|
||||
UseExistingKek: false,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
_, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createTempDir() error {
|
||||
if err := os.RemoveAll(localLogDirectory); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return os.MkdirAll(localLogDirectory, 0o755)
|
||||
}
|
||||
|
||||
func addNewCoordinatorToCoordinator(ctx context.Context, newCoordinatorAddr, oldCoordinatorAddr string) error {
|
||||
creds := atlscredentials.New(nil, atls.NewFakeValidators(oid.Dummy{}))
|
||||
|
||||
conn, err := grpc.DialContext(ctx, net.JoinHostPort(oldCoordinatorAddr, publicgRPCPort), grpc.WithTransportCredentials(creds))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
|
||||
_, err = client.ActivateAdditionalCoordinator(ctx, &pubproto.ActivateAdditionalCoordinatorRequest{
|
||||
CoordinatorPublicIp: newCoordinatorAddr,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addNewNodesToCoordinator(ctx context.Context, coordinatorAddr string, ips []string) error {
|
||||
creds := atlscredentials.New(nil, atls.NewFakeValidators(oid.Dummy{}))
|
||||
|
||||
conn, err := grpc.DialContext(ctx, net.JoinHostPort(coordinatorAddr, publicgRPCPort), grpc.WithTransportCredentials(creds))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
|
||||
stream, err := client.ActivateAdditionalNodes(ctx, &pubproto.ActivateAdditionalNodesRequest{NodePublicIps: ips})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
_, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func spawnContainers(ctx context.Context, cli *client.Client, count int, activeContainers map[string]peerInfo) ([]string, error) {
|
||||
tmpPeerIPs := make([]string, 0, count)
|
||||
// spawn client container(s) and obtain their docker network ip address
|
||||
for i := 0; i < count; i++ {
|
||||
resp, err := createNewNode(ctx, cli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attachDockerContainerStdoutStderrToFile(ctx, cli, resp.containerResponse.ID, role.Node)
|
||||
tmpPeerIPs = append(tmpPeerIPs, resp.dockerIPAddr)
|
||||
containerData, err := cli.ContainerInspect(ctx, resp.containerResponse.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
activeContainers[containerData.NetworkSettings.DefaultNetworkSettings.IPAddress] = peerInfo{dockerData: resp.containerResponse, isCoordinator: false}
|
||||
}
|
||||
return tmpPeerIPs, blockUntilUp(ctx, tmpPeerIPs)
|
||||
}
|
||||
|
||||
func createCoordinatorContainer(ctx context.Context, cli *client.Client, name, dockerNetworkID string, activePeers map[string]peerInfo) error {
|
||||
resp, err := cli.ContainerCreate(ctx, configConstellationPeer, hostconfigConstellationPeer, nil, nil, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attachDockerContainerStdoutStderrToFile(ctx, cli, resp.ID, role.Coordinator)
|
||||
coordinatorData, err := cli.ContainerInspect(ctx, resp.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
activePeers[coordinatorData.NetworkSettings.DefaultNetworkSettings.IPAddress] = peerInfo{dockerData: resp, isCoordinator: true}
|
||||
activeCoordinators = append(activeCoordinators, coordinatorData.NetworkSettings.DefaultNetworkSettings.IPAddress)
|
||||
return cli.NetworkConnect(ctx, dockerNetworkID, resp.ID, nil)
|
||||
}
|
||||
|
||||
// Make the port forward binding, so we can access the coordinator from the host
|
||||
func makeBinding(ip, internalPort string, externalPort string) nat.PortMap {
|
||||
binding := nat.PortBinding{
|
||||
HostIP: ip,
|
||||
HostPort: externalPort,
|
||||
}
|
||||
bindingMap := map[nat.Port][]nat.PortBinding{nat.Port(fmt.Sprintf("%s/tcp", internalPort)): {binding}}
|
||||
return nat.PortMap(bindingMap)
|
||||
}
|
||||
|
||||
func killDockerContainers(ctx context.Context, cli *client.Client, activeContainers map[string]peerInfo) {
|
||||
for _, v := range activeContainers {
|
||||
killDockerContainer(ctx, cli, v.dockerData)
|
||||
}
|
||||
}
|
||||
|
||||
func killDockerContainer(ctx context.Context, cli *client.Client, container container.ContainerCreateCreatedBody) {
|
||||
fmt.Print("Kill container ", container.ID[:10], "... ")
|
||||
if err := cli.ContainerKill(ctx, container.ID, "9"); err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println("Success")
|
||||
}
|
||||
|
||||
func attachDockerContainerStdoutStderrToFile(ctx context.Context, cli *client.Client, id string, peerRole role.Role) {
|
||||
resp, err := cli.ContainerLogs(ctx, id, containerLogConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var file *os.File
|
||||
switch peerRole {
|
||||
case role.Node:
|
||||
file, err = os.Create(fmt.Sprintf("%s/node-%d", localLogDirectory, nodeCounter))
|
||||
nodeCounter += 1
|
||||
case role.Coordinator:
|
||||
file, err = os.Create(fmt.Sprintf("%s/coordinator-%d", localLogDirectory, coordinatorCounter))
|
||||
coordinatorCounter += 1
|
||||
default:
|
||||
panic("invalid role")
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go io.Copy(file, resp) // TODO: this goroutine leaks
|
||||
}
|
||||
|
||||
func imageBuild(ctx context.Context, dockerClient *client.Client) error {
|
||||
// Docker need a BuildContext, generate it...
|
||||
tar, err := archive.TarWithOptions(".", &archive.TarOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := dockerClient.ImageBuild(ctx, tar, constellationDockerImageBuildOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if _, err := io.Copy(os.Stdout, resp.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
// Block until EOF, so the build has finished if we continue
|
||||
_, err = io.Copy(io.Discard, resp.Body)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// count number of wireguard peers within all active docker containers
|
||||
func countPeersTest(ctx context.Context, t *testing.T, cli *client.Client, execConfig types.ExecConfig, activeContainers map[string]peerInfo) {
|
||||
t.Run("countPeerTest", func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
for ip, id := range activeContainers {
|
||||
respIDExecCreate, err := cli.ContainerExecCreate(ctx, id.dockerData.ID, execConfig)
|
||||
require.NoError(err)
|
||||
respID, err := cli.ContainerExecAttach(ctx, respIDExecCreate.ID, types.ExecStartCheck{})
|
||||
require.NoError(err)
|
||||
output, err := io.ReadAll(respID.Reader)
|
||||
require.NoError(err)
|
||||
respID.Close()
|
||||
countedPeers := strings.Count(string(output), "peer")
|
||||
fmt.Printf("% 3d peers in container %s [%s] out of % 3d total nodes \n", countedPeers, id.dockerData.ID, ip, len(activeContainers))
|
||||
|
||||
assert.Equal(len(activeContainers), countedPeers)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func pingTest(ctx context.Context, t *testing.T, cli *client.Client, execConfig types.ExecConfig, activeContainers map[string]peerInfo, etcdstore store.Store) {
|
||||
t.Run("pingTest", func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
peerVPNIPsWithoutAdmins, err := getPeerVPNIPsFromEtcd(etcdstore)
|
||||
require.NoError(err)
|
||||
// all nodes + coordinator are peers
|
||||
require.Equal(len(peerVPNIPsWithoutAdmins), len(activeContainers))
|
||||
|
||||
for i := 0; i < len(peerVPNIPsWithoutAdmins); i++ {
|
||||
execConfig.Cmd = []string{"ping", "-q", "-c", "1", "-W", "1", peerVPNIPsWithoutAdmins[i]}
|
||||
for _, id := range activeContainers {
|
||||
fmt.Printf("Ping from container %v | % 19s to container % 19s", id.dockerData.ID, id.vpnIP, peerVPNIPsWithoutAdmins[i])
|
||||
|
||||
respIDExecCreate, err := cli.ContainerExecCreate(ctx, id.dockerData.ID, execConfig)
|
||||
require.NoError(err)
|
||||
|
||||
err = cli.ContainerExecStart(ctx, respIDExecCreate.ID, types.ExecStartCheck{})
|
||||
require.NoError(err)
|
||||
|
||||
resp, err := cli.ContainerExecInspect(ctx, respIDExecCreate.ID)
|
||||
require.NoError(err)
|
||||
assert.Equal(0, resp.ExitCode)
|
||||
if resp.ExitCode == 0 {
|
||||
fmt.Printf(" ...Success\n")
|
||||
} else {
|
||||
fmt.Printf(" ...Failure\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type newNodeData struct {
|
||||
containerResponse container.ContainerCreateCreatedBody
|
||||
dockerIPAddr string
|
||||
}
|
||||
|
||||
// pass error one level up
|
||||
func createNewNode(ctx context.Context, cli *client.Client) (*newNodeData, error) {
|
||||
resp, err := cli.ContainerCreate(ctx, configConstellationPeer, hostconfigConstellationPeer, nil, nil, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
containerData, err := cli.ContainerInspect(ctx, resp.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fmt.Printf("created Node %v\n", containerData.ID)
|
||||
return &newNodeData{resp, containerData.NetworkSettings.IPAddress}, nil
|
||||
}
|
||||
|
||||
func awaitPeerResponse(ctx context.Context, ip string, credentials credentials.TransportCredentials) error {
|
||||
// Block, so the connection gets established/fails immediately
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, net.JoinHostPort(ip, publicgRPCPort), grpc.WithBlock(), grpc.WithTransportCredentials(credentials))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return conn.Close()
|
||||
}
|
||||
|
||||
func blockUntilUp(ctx context.Context, peerIPs []string) error {
|
||||
creds := atlscredentials.New(nil, atls.NewFakeValidators(oid.Dummy{}))
|
||||
for _, ip := range peerIPs {
|
||||
// Block, so the connection gets established/fails immediately
|
||||
if err := awaitPeerResponse(ctx, ip, creds); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPeerVPNIPsFromEtcd(etcdstore store.Store) ([]string, error) {
|
||||
peers, err := storewrapper.StoreWrapper{Store: etcdstore}.GetPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vpnIPS := make([]string, 0, len(peers))
|
||||
|
||||
for _, peer := range peers {
|
||||
if peer.Role != role.Admin {
|
||||
vpnIPS = append(vpnIPS, peer.VPNIP)
|
||||
}
|
||||
}
|
||||
return vpnIPS, nil
|
||||
}
|
||||
|
||||
func translatePublicToVPNIP(publicIP string, etcdstore store.Store) (string, error) {
|
||||
peers, err := storewrapper.StoreWrapper{Store: etcdstore}.GetPeers()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, peer := range peers {
|
||||
if peer.PublicIP == publicIP {
|
||||
return peer.VPNIP, nil
|
||||
}
|
||||
}
|
||||
return "", errors.New("Did not found VPN IP")
|
||||
}
|
||||
|
||||
func updateVPNIPs(activeContainers map[string]peerInfo, etcdstore store.Store) error {
|
||||
for publicIP, v := range activeContainers {
|
||||
vpnIP, err := translatePublicToVPNIP(publicIP, etcdstore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.vpnIP = vpnIP
|
||||
activeContainers[publicIP] = v
|
||||
}
|
||||
return nil
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user