mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-03-20 05:56:19 -04:00
update state disk passphrase on activation
Signed-off-by: Malte Poll <mp@edgeless.systems>
This commit is contained in:
parent
1b6ecf27ee
commit
3ce3978063
@ -115,9 +115,17 @@ func TestConcurrent(t *testing.T) {
|
||||
_ = activateCoordinator(require, dialer, coordinatorIP, bindPort, nodeIPs)
|
||||
}
|
||||
|
||||
actNode := func(papi *pubapi.API) {
|
||||
actNode := func(target string) {
|
||||
defer wg.Done()
|
||||
_, err := papi.ActivateAsNode(context.Background(), &pubproto.ActivateAsNodeRequest{})
|
||||
conn, _ := dialGRPC(context.Background(), dialer, target)
|
||||
defer conn.Close()
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
stream, err := client.ActivateAsNode(context.Background())
|
||||
assert.NoError(err)
|
||||
assert.NoError(stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||
Request: &pubproto.ActivateAsNodeRequest_InitialRequest{},
|
||||
}))
|
||||
_, err = stream.Recv()
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
@ -165,12 +173,12 @@ func TestConcurrent(t *testing.T) {
|
||||
wg.Add(26)
|
||||
go actCoord()
|
||||
go actCoord()
|
||||
go actNode(coordPAPI)
|
||||
go actNode(coordPAPI)
|
||||
go actNode(nodePAPI1)
|
||||
go actNode(nodePAPI1)
|
||||
go actNode(nodePAPI2)
|
||||
go actNode(nodePAPI2)
|
||||
go actNode(net.JoinHostPort(coordinatorIP, bindPort))
|
||||
go actNode(net.JoinHostPort(coordinatorIP, bindPort))
|
||||
go actNode(net.JoinHostPort(nodeIPs[0], bindPort))
|
||||
go actNode(net.JoinHostPort(nodeIPs[0], bindPort))
|
||||
go actNode(net.JoinHostPort(nodeIPs[1], bindPort))
|
||||
go actNode(net.JoinHostPort(nodeIPs[1], bindPort))
|
||||
go updNode(coordPAPI, false)
|
||||
go updNode(coordPAPI, false)
|
||||
go updNode(nodePAPI1, true)
|
||||
|
@ -97,20 +97,6 @@ func TestLegacyActivateCoordinator(t *testing.T) {
|
||||
// Coordinator cannot be activated a second time
|
||||
assert.Error(coordinatorAPI.ActivateAsCoordinator(activationReq, testActivationSvr))
|
||||
|
||||
// Node cannot be activated a second time
|
||||
nodeResp, err := nodeAPI3.ActivateAsNode(context.TODO(), &pubproto.ActivateAsNodeRequest{
|
||||
NodeVpnIp: "192.0.2.1:9004",
|
||||
Peers: []*pubproto.Peer{{
|
||||
VpnPubKey: coordinatorKey,
|
||||
PublicIp: coordinatorIP,
|
||||
VpnIp: "10.118.0.1",
|
||||
}},
|
||||
OwnerId: []byte("ownerID"),
|
||||
ClusterId: []byte("clusterID"),
|
||||
})
|
||||
assert.Error(err)
|
||||
assert.Nil(nodeResp)
|
||||
|
||||
// Assert Coordinator
|
||||
peers := coordinatorCore.vpn.(*stubVPN).peers
|
||||
assert.Less(3, len(peers))
|
||||
|
@ -117,6 +117,18 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
if err := a.core.PersistNodeState(role.Coordinator, ownerID, clusterID); err != nil {
|
||||
return status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||
}
|
||||
diskUUID, err := a.core.GetDiskUUID()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "getting disk uuid: %v", err)
|
||||
}
|
||||
diskKey, err := a.core.GetDataKey(ctx, diskUUID, 32)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "getting disk key: %v", err)
|
||||
}
|
||||
if err := a.core.UpdateDiskPassphrase(string(diskKey)); err != nil {
|
||||
return status.Errorf(codes.Internal, "updating disk key: %v", err)
|
||||
}
|
||||
|
||||
adminVPNIP, err := a.core.GetNextNodeIP()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "requesting node IP address: %v", err)
|
||||
@ -273,18 +285,78 @@ func (a *API) activateNode(nodePublicIP string, nodeVPNIP string, initialPeers [
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
|
||||
resp, err := client.ActivateAsNode(ctx, &pubproto.ActivateAsNodeRequest{
|
||||
NodeVpnIp: nodeVPNIP,
|
||||
Peers: initialPeers,
|
||||
OwnerId: ownerID,
|
||||
ClusterId: clusterID,
|
||||
})
|
||||
stream, err := client.ActivateAsNode(ctx)
|
||||
if err != nil {
|
||||
a.logger.Error("node activation failed", zap.Error(err))
|
||||
a.logger.Error("connecting to node for activation failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp.NodeVpnPubKey, nil
|
||||
/*
|
||||
coordinator -> initial request -> node
|
||||
*/
|
||||
if err := stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||
Request: &pubproto.ActivateAsNodeRequest_InitialRequest{
|
||||
InitialRequest: &pubproto.ActivateAsNodeInitialRequest{
|
||||
NodeVpnIp: nodeVPNIP,
|
||||
Peers: initialPeers,
|
||||
OwnerId: ownerID,
|
||||
ClusterId: clusterID,
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
a.logger.Error("sending initial message to node for activation failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator <- state disk uuid <- node
|
||||
*/
|
||||
// wait for message containing the nodes disk UUID to send back the permanent encryption key
|
||||
message, err := stream.Recv()
|
||||
if err != nil {
|
||||
a.logger.Error("expected disk UUID message but no message received", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
diskUUID, ok := message.GetResponse().(*pubproto.ActivateAsNodeResponse_StateDiskUuid)
|
||||
if !ok {
|
||||
a.logger.Error("expected disk UUID message but got different message")
|
||||
return nil, errors.New("expected state disk UUID but got different message type")
|
||||
}
|
||||
diskKey, err := a.core.GetDataKey(ctx, diskUUID.StateDiskUuid, 32)
|
||||
if err != nil {
|
||||
a.logger.Error("failed to derive node's disk key")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator -> state disk key -> node
|
||||
*/
|
||||
// send back state disk encryption key
|
||||
if err := stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||
Request: &pubproto.ActivateAsNodeRequest_StateDiskKey{
|
||||
StateDiskKey: diskKey,
|
||||
},
|
||||
}); err != nil {
|
||||
a.logger.Error("sending state disk key to node on activation failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator <- VPN public key <- node
|
||||
*/
|
||||
// wait for message containing the node VPN pubkey
|
||||
message, err = stream.Recv()
|
||||
if err != nil {
|
||||
a.logger.Error("expected node VPN pubkey but no message received", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
vpnPubKey, ok := message.GetResponse().(*pubproto.ActivateAsNodeResponse_NodeVpnPubKey)
|
||||
if !ok {
|
||||
a.logger.Error("expected node VPN pubkey but got different message")
|
||||
return nil, errors.New("expected node VPN pub key but got different message type")
|
||||
}
|
||||
|
||||
return vpnPubKey.NodeVpnPubKey, nil
|
||||
}
|
||||
|
||||
// assemblePeerStruct combines all information of this peer into a peer struct.
|
||||
|
@ -3,6 +3,7 @@ package pubapi
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
@ -25,9 +26,9 @@ import (
|
||||
func TestActivateAsCoordinator(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
coordinatorPubKey := []byte{6, 7, 8}
|
||||
testNode1 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.11", VPNPubKey: []byte{1, 2, 3}}}
|
||||
testNode2 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.12", VPNPubKey: []byte{2, 3, 4}}}
|
||||
testNode3 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.13", VPNPubKey: []byte{3, 4, 5}}}
|
||||
testNode1 := newStubPeer("192.0.2.11", []byte{1, 2, 3})
|
||||
testNode2 := newStubPeer("192.0.2.12", []byte{2, 3, 4})
|
||||
testNode3 := newStubPeer("192.0.2.13", []byte{3, 4, 5})
|
||||
expectedNode1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "10.118.0.11", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
||||
expectedNode2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "10.118.0.12", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
||||
expectedNode3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "10.118.0.13", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
||||
@ -192,9 +193,9 @@ func TestActivateAsCoordinator(t *testing.T) {
|
||||
|
||||
func TestActivateAdditionalNodes(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
testNode1 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.11", VPNPubKey: []byte{1, 2, 3}}}
|
||||
testNode2 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.12", VPNPubKey: []byte{2, 3, 4}}}
|
||||
testNode3 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.13", VPNPubKey: []byte{3, 4, 5}}}
|
||||
testNode1 := newStubPeer("192.0.2.11", []byte{1, 2, 3})
|
||||
testNode2 := newStubPeer("192.0.2.12", []byte{2, 3, 4})
|
||||
testNode3 := newStubPeer("192.0.2.13", []byte{3, 4, 5})
|
||||
expectedNode1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "10.118.0.11", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
||||
expectedNode2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "10.118.0.12", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
||||
expectedNode3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "10.118.0.13", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
||||
@ -323,14 +324,43 @@ func TestAssemblePeerStruct(t *testing.T) {
|
||||
}
|
||||
|
||||
type stubPeer struct {
|
||||
peer peer.Peer
|
||||
activateErr error
|
||||
joinErr error
|
||||
peer peer.Peer
|
||||
activateAsNodeMessages []*pubproto.ActivateAsNodeResponse
|
||||
activateAsNodeReceive int
|
||||
activateErr error
|
||||
joinErr error
|
||||
pubproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func (n *stubPeer) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeRequest) (*pubproto.ActivateAsNodeResponse, error) {
|
||||
return &pubproto.ActivateAsNodeResponse{NodeVpnPubKey: n.peer.VPNPubKey}, n.activateErr
|
||||
func newStubPeer(publicIP string, vpnPubKey []byte) *stubPeer {
|
||||
return &stubPeer{
|
||||
peer: peer.Peer{PublicIP: publicIP, VPNPubKey: vpnPubKey},
|
||||
activateAsNodeMessages: []*pubproto.ActivateAsNodeResponse{
|
||||
{Response: &pubproto.ActivateAsNodeResponse_StateDiskUuid{StateDiskUuid: "state-disk-uuid"}},
|
||||
{Response: &pubproto.ActivateAsNodeResponse_NodeVpnPubKey{NodeVpnPubKey: vpnPubKey}},
|
||||
},
|
||||
activateAsNodeReceive: 2,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *stubPeer) ActivateAsNode(stream pubproto.API_ActivateAsNodeServer) error {
|
||||
for _, message := range n.activateAsNodeMessages {
|
||||
err := stream.Send(message)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for i := 0; i < n.activateAsNodeReceive; i++ {
|
||||
_, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := stream.Recv(); err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.activateErr
|
||||
}
|
||||
|
||||
func (n *stubPeer) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.ActivateAsAdditionalCoordinatorRequest) (*pubproto.ActivateAsAdditionalCoordinatorResponse, error) {
|
||||
|
@ -74,6 +74,25 @@ func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
// persist node state on disk
|
||||
if err := a.core.PersistNodeState(role.Coordinator, in.OwnerId, in.ClusterId); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||
}
|
||||
diskUUID, err := a.core.GetDiskUUID()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "getting disk uuid: %v", err)
|
||||
}
|
||||
diskKey, err := a.core.GetDataKey(ctx, diskUUID, 32)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "getting disk key: %v", err)
|
||||
}
|
||||
if err := a.core.UpdateDiskPassphrase(string(diskKey)); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "updating disk key: %v", err)
|
||||
}
|
||||
|
||||
// regularly get (peer) updates from etcd
|
||||
// start update before manual peer add to omit race conditions when multiple coordinator are activating nodes
|
||||
|
||||
thisPeer, err := a.assemblePeerStruct(in.AssignedVpnIp, role.Coordinator)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "assembling coordinator peer struct: %v", err)
|
||||
|
@ -16,18 +16,64 @@ import (
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
/*
|
||||
+-------------+ +-------+
|
||||
| coordinator | | node |
|
||||
+-------------+ +-------+
|
||||
| |
|
||||
| initial request |
|
||||
|-------------------->|
|
||||
| | -------------------------------------------\
|
||||
| |-| update state "NodeWaitingForClusterJoin" |
|
||||
| | |------------------------------------------|
|
||||
| | ------------\
|
||||
| |-| setup VPN |
|
||||
| | |-----------|
|
||||
| | ---------------------\
|
||||
| |-| persist node state |
|
||||
| | |--------------------|
|
||||
| |
|
||||
| state disk uuid |
|
||||
|<--------------------|
|
||||
------------------------\ | |
|
||||
| derive state disk key |-| |
|
||||
|-----------------------| | |
|
||||
| |
|
||||
| state disk key |
|
||||
|-------------------->|
|
||||
| | -------------------------------\
|
||||
| |-| update state disk passphrase |
|
||||
| | |------------------------------|
|
||||
| |
|
||||
| VPN public key |
|
||||
|<--------------------|
|
||||
| |
|
||||
*/
|
||||
|
||||
// ActivateAsNode is the RPC call to activate a Node.
|
||||
func (a *API) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeRequest) (resp *pubproto.ActivateAsNodeResponse, reterr error) {
|
||||
func (a *API) ActivateAsNode(stream pubproto.API_ActivateAsNodeServer) (reterr error) {
|
||||
a.mut.Lock()
|
||||
defer a.mut.Unlock()
|
||||
|
||||
if err := a.core.RequireState(state.AcceptingInit); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "node is not in required state for activation: %v", err)
|
||||
return status.Errorf(codes.FailedPrecondition, "node is not in required state for activation: %v", err)
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator -> initial request -> node
|
||||
*/
|
||||
message, err := stream.Recv()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "could not receive initial request from coordinator: %v", err)
|
||||
}
|
||||
initialRequest, ok := message.GetRequest().(*pubproto.ActivateAsNodeRequest_InitialRequest)
|
||||
if !ok {
|
||||
return status.Error(codes.Internal, "expected initial request but got different message type")
|
||||
}
|
||||
in := initialRequest.InitialRequest
|
||||
if len(in.OwnerId) == 0 || len(in.ClusterId) == 0 {
|
||||
a.logger.Error("missing data to taint worker node as initialized")
|
||||
return nil, status.Error(codes.InvalidArgument, "missing data to taint worker node as initialized")
|
||||
return status.Error(codes.InvalidArgument, "missing data to taint worker node as initialized")
|
||||
}
|
||||
|
||||
// If any of the following actions fail, we cannot revert.
|
||||
@ -42,33 +88,75 @@ func (a *API) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeReq
|
||||
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
|
||||
// Any new additions to ActivateAsNode MUST come after
|
||||
if err := a.core.AdvanceState(state.NodeWaitingForClusterJoin, in.OwnerId, in.ClusterId); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "advance node state: %v", err)
|
||||
return status.Errorf(codes.Internal, "advance node state: %v", err)
|
||||
}
|
||||
|
||||
vpnPubKey, err := a.core.GetVPNPubKey()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "get vpn publicKey: %v", err)
|
||||
return status.Errorf(codes.Internal, "get vpn publicKey: %v", err)
|
||||
}
|
||||
|
||||
if err := a.core.SetVPNIP(in.NodeVpnIp); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "setting node vpn IP address: %v", err)
|
||||
return status.Errorf(codes.Internal, "setting node vpn IP address: %v", err)
|
||||
}
|
||||
|
||||
// add initial peers
|
||||
if err := a.core.UpdatePeers(peer.FromPubProto(in.Peers)); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "synchronizing peers with vpn state: %v", err)
|
||||
return status.Errorf(codes.Internal, "synchronizing peers with vpn state: %v", err)
|
||||
}
|
||||
|
||||
// persist node state on disk
|
||||
if err := a.core.PersistNodeState(role.Node, in.OwnerId, in.ClusterId); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||
return status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator <- state disk uuid <- node
|
||||
*/
|
||||
diskUUID, err := a.core.GetDiskUUID()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "get disk uuid: %v", err)
|
||||
}
|
||||
if err := stream.Send(&pubproto.ActivateAsNodeResponse{
|
||||
Response: &pubproto.ActivateAsNodeResponse_StateDiskUuid{StateDiskUuid: diskUUID},
|
||||
}); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
/*
|
||||
coordinator -> state disk key -> node
|
||||
*/
|
||||
message, err = stream.Recv()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "could not receive state disk key from coordinator: %v", err)
|
||||
}
|
||||
diskKey, ok := message.GetRequest().(*pubproto.ActivateAsNodeRequest_StateDiskKey)
|
||||
if !ok {
|
||||
return status.Error(codes.Internal, "expected state disk key but got different message type")
|
||||
}
|
||||
if diskKey.StateDiskKey == nil {
|
||||
return status.Error(codes.Internal, "empty state disk key message from coordinator")
|
||||
}
|
||||
if err := a.core.UpdateDiskPassphrase(string(diskKey.StateDiskKey)); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
// regularly get (peer) updates from Coordinator
|
||||
a.wgClose.Add(1)
|
||||
go a.updateLoop()
|
||||
|
||||
return &pubproto.ActivateAsNodeResponse{NodeVpnPubKey: vpnPubKey}, nil
|
||||
/*
|
||||
coordinator <- VPN public key <- node
|
||||
*/
|
||||
if err := stream.Send(&pubproto.ActivateAsNodeResponse{
|
||||
Response: &pubproto.ActivateAsNodeResponse_NodeVpnPubKey{
|
||||
NodeVpnPubKey: vpnPubKey,
|
||||
},
|
||||
}); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// JoinCluster is the RPC call to request this node to join the cluster.
|
||||
|
@ -3,9 +3,12 @@ package pubapi
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/atls"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
@ -16,6 +19,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
@ -25,13 +29,14 @@ func TestActivateAsNode(t *testing.T) {
|
||||
peer2 := peer.Peer{PublicIP: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}}
|
||||
|
||||
testCases := map[string]struct {
|
||||
initialPeers []peer.Peer
|
||||
updatedPeers []peer.Peer
|
||||
state state.State
|
||||
getUpdateErr error
|
||||
setVPNIPErr error
|
||||
expectErr bool
|
||||
expectedState state.State
|
||||
initialPeers []peer.Peer
|
||||
updatedPeers []peer.Peer
|
||||
state state.State
|
||||
getUpdateErr error
|
||||
setVPNIPErr error
|
||||
messageSequenceOverride []string
|
||||
expectErr bool
|
||||
expectedState state.State
|
||||
}{
|
||||
"basic": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
@ -68,6 +73,38 @@ func TestActivateAsNode(t *testing.T) {
|
||||
expectErr: true,
|
||||
expectedState: state.Failed,
|
||||
},
|
||||
"no messages sent to node": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
messageSequenceOverride: []string{},
|
||||
expectErr: true,
|
||||
expectedState: state.AcceptingInit,
|
||||
},
|
||||
"only initial message sent to node": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
messageSequenceOverride: []string{"initialRequest"},
|
||||
expectErr: true,
|
||||
expectedState: state.Failed,
|
||||
},
|
||||
"wrong initial message sent to node": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
messageSequenceOverride: []string{"stateDiskKey"},
|
||||
expectErr: true,
|
||||
expectedState: state.AcceptingInit,
|
||||
},
|
||||
"initial message sent twice to node": {
|
||||
initialPeers: []peer.Peer{peer1},
|
||||
updatedPeers: []peer.Peer{peer2},
|
||||
state: state.AcceptingInit,
|
||||
messageSequenceOverride: []string{"initialRequest", "initialRequest"},
|
||||
expectErr: true,
|
||||
expectedState: state.Failed,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
@ -75,16 +112,24 @@ func TestActivateAsNode(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
const nodeVPNIP = "192.0.2.2"
|
||||
const (
|
||||
nodeIP = "192.0.2.2"
|
||||
nodeVPNIP = "10.118.0.2"
|
||||
)
|
||||
vpnPubKey := []byte{7, 8, 9}
|
||||
ownerID := []byte("ownerID")
|
||||
clusterID := []byte("clusterID")
|
||||
stateDiskKey := []byte("stateDiskKey")
|
||||
messageSequence := []string{"initialRequest", "stateDiskKey"}
|
||||
if tc.messageSequenceOverride != nil {
|
||||
messageSequence = tc.messageSequenceOverride
|
||||
}
|
||||
|
||||
logger := zaptest.NewLogger(t)
|
||||
core := &fakeCore{state: tc.state, vpnPubKey: vpnPubKey, setVPNIPErr: tc.setVPNIPErr}
|
||||
cor := &fakeCore{state: tc.state, vpnPubKey: vpnPubKey, setVPNIPErr: tc.setVPNIPErr}
|
||||
dialer := testdialer.NewBufconnDialer()
|
||||
|
||||
api := New(logger, core, dialer, nil, nil, nil)
|
||||
api := New(logger, cor, dialer, nil, nil, nil)
|
||||
defer api.Close()
|
||||
|
||||
vserver := grpc.NewServer()
|
||||
@ -93,14 +138,15 @@ func TestActivateAsNode(t *testing.T) {
|
||||
go vserver.Serve(dialer.GetListener(net.JoinHostPort("10.118.0.1", vpnAPIPort)))
|
||||
defer vserver.GracefulStop()
|
||||
|
||||
resp, err := api.ActivateAsNode(context.Background(), &pubproto.ActivateAsNodeRequest{
|
||||
NodeVpnIp: nodeVPNIP,
|
||||
Peers: peer.ToPubProto(tc.initialPeers),
|
||||
OwnerId: ownerID,
|
||||
ClusterId: clusterID,
|
||||
})
|
||||
tlsConfig, err := atls.CreateAttestationServerTLSConfig(&core.MockIssuer{})
|
||||
require.NoError(err)
|
||||
pubserver := grpc.NewServer(grpc.Creds(credentials.NewTLS(tlsConfig)))
|
||||
pubproto.RegisterAPIServer(pubserver, api)
|
||||
go pubserver.Serve(dialer.GetListener(net.JoinHostPort(nodeIP, endpointAVPNPort)))
|
||||
defer pubserver.GracefulStop()
|
||||
|
||||
assert.Equal(tc.expectedState, core.state)
|
||||
_, nodeVPNPubKey, err := activateNode(require, dialer, messageSequence, nodeIP, "9000", nodeVPNIP, peer.ToPubProto(tc.initialPeers), ownerID, clusterID, stateDiskKey)
|
||||
assert.Equal(tc.expectedState, cor.state)
|
||||
|
||||
if tc.expectErr {
|
||||
assert.Error(err)
|
||||
@ -108,21 +154,21 @@ func TestActivateAsNode(t *testing.T) {
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
assert.Equal(vpnPubKey, resp.NodeVpnPubKey)
|
||||
assert.Equal(nodeVPNIP, core.vpnIP)
|
||||
assert.Equal(ownerID, core.ownerID)
|
||||
assert.Equal(clusterID, core.clusterID)
|
||||
assert.Equal(vpnPubKey, nodeVPNPubKey)
|
||||
assert.Equal(nodeVPNIP, cor.vpnIP)
|
||||
assert.Equal(ownerID, cor.ownerID)
|
||||
assert.Equal(clusterID, cor.clusterID)
|
||||
|
||||
api.Close() // blocks until update loop finished
|
||||
|
||||
if tc.getUpdateErr == nil {
|
||||
require.Len(core.updatedPeers, 2)
|
||||
assert.Equal(tc.updatedPeers, core.updatedPeers[1])
|
||||
require.Len(cor.updatedPeers, 2)
|
||||
assert.Equal(tc.updatedPeers, cor.updatedPeers[1])
|
||||
} else {
|
||||
require.Len(core.updatedPeers, 1)
|
||||
require.Len(cor.updatedPeers, 1)
|
||||
}
|
||||
assert.Equal(tc.initialPeers, core.updatedPeers[0])
|
||||
assert.Equal([]role.Role{role.Node}, core.persistNodeStateRoles)
|
||||
assert.Equal(tc.initialPeers, cor.updatedPeers[0])
|
||||
assert.Equal([]role.Role{role.Node}, cor.persistNodeStateRoles)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -276,6 +322,83 @@ func TestJoinCluster(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func activateNode(require *require.Assertions, dialer Dialer, messageSequence []string, nodeIP, bindPort, nodeVPNIP string, peers []*pubproto.Peer, ownerID, clusterID, stateDiskKey []byte) (string, []byte, error) {
|
||||
ctx := context.Background()
|
||||
conn, err := dialGRPC(ctx, dialer, net.JoinHostPort(nodeIP, bindPort))
|
||||
require.NoError(err)
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
stream, err := client.ActivateAsNode(ctx)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
for _, message := range messageSequence {
|
||||
switch message {
|
||||
case "initialRequest":
|
||||
err = stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||
Request: &pubproto.ActivateAsNodeRequest_InitialRequest{
|
||||
InitialRequest: &pubproto.ActivateAsNodeInitialRequest{
|
||||
NodeVpnIp: nodeVPNIP,
|
||||
Peers: peers,
|
||||
OwnerId: ownerID,
|
||||
ClusterId: clusterID,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
case "stateDiskKey":
|
||||
err = stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||
Request: &pubproto.ActivateAsNodeRequest_StateDiskKey{
|
||||
StateDiskKey: stateDiskKey,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
default:
|
||||
panic("unknown message in activation")
|
||||
}
|
||||
}
|
||||
require.NoError(stream.CloseSend())
|
||||
|
||||
diskUUIDReq, err := stream.Recv()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
diskUUID := diskUUIDReq.GetStateDiskUuid()
|
||||
|
||||
vpnPubKeyReq, err := stream.Recv()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
nodeVPNPubKey := vpnPubKeyReq.GetNodeVpnPubKey()
|
||||
|
||||
_, err = stream.Recv()
|
||||
if err != io.EOF {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return diskUUID, nodeVPNPubKey, nil
|
||||
}
|
||||
|
||||
func dialGRPC(ctx context.Context, dialer Dialer, target string) (*grpc.ClientConn, error) {
|
||||
tlsConfig, err := atls.CreateAttestationClientTLSConfig([]atls.Validator{&core.MockValidator{}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return grpc.DialContext(ctx, target,
|
||||
grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return dialer.DialContext(ctx, "tcp", addr)
|
||||
}),
|
||||
grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),
|
||||
)
|
||||
}
|
||||
|
||||
type stubVPNAPI struct {
|
||||
peers []peer.Peer
|
||||
getUpdateErr error
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -7,7 +7,7 @@ option go_package = "github.com/edgelesssys/constellation/coordinator/pubapi/pub
|
||||
service API {
|
||||
rpc GetState(GetStateRequest) returns (GetStateResponse);
|
||||
rpc ActivateAsCoordinator(ActivateAsCoordinatorRequest) returns (stream ActivateAsCoordinatorResponse);
|
||||
rpc ActivateAsNode(ActivateAsNodeRequest) returns (ActivateAsNodeResponse);
|
||||
rpc ActivateAsNode(stream ActivateAsNodeRequest) returns (stream ActivateAsNodeResponse);
|
||||
rpc ActivateAdditionalNodes(ActivateAdditionalNodesRequest) returns (stream ActivateAdditionalNodesResponse);
|
||||
rpc ActivateAsAdditionalCoordinator(ActivateAsAdditionalCoordinatorRequest) returns (ActivateAsAdditionalCoordinatorResponse);
|
||||
rpc ActivateAdditionalCoordinator(ActivateAdditionalCoordinatorRequest) returns (ActivateAdditionalCoordinatorResponse);
|
||||
@ -44,6 +44,13 @@ message ActivateAsCoordinatorResponse {
|
||||
}
|
||||
|
||||
message ActivateAsNodeRequest {
|
||||
oneof request {
|
||||
ActivateAsNodeInitialRequest initial_request = 1;
|
||||
bytes state_disk_key = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ActivateAsNodeInitialRequest {
|
||||
string node_vpn_ip = 1;
|
||||
repeated Peer peers = 2;
|
||||
bytes owner_id = 3;
|
||||
@ -51,7 +58,10 @@ message ActivateAsNodeRequest {
|
||||
}
|
||||
|
||||
message ActivateAsNodeResponse {
|
||||
bytes node_vpn_pub_key = 1;
|
||||
oneof response {
|
||||
bytes node_vpn_pub_key = 1;
|
||||
string state_disk_uuid = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ActivateAdditionalNodesRequest {
|
||||
|
@ -20,7 +20,7 @@ const _ = grpc.SupportPackageIsVersion7
|
||||
type APIClient interface {
|
||||
GetState(ctx context.Context, in *GetStateRequest, opts ...grpc.CallOption) (*GetStateResponse, error)
|
||||
ActivateAsCoordinator(ctx context.Context, in *ActivateAsCoordinatorRequest, opts ...grpc.CallOption) (API_ActivateAsCoordinatorClient, error)
|
||||
ActivateAsNode(ctx context.Context, in *ActivateAsNodeRequest, opts ...grpc.CallOption) (*ActivateAsNodeResponse, error)
|
||||
ActivateAsNode(ctx context.Context, opts ...grpc.CallOption) (API_ActivateAsNodeClient, error)
|
||||
ActivateAdditionalNodes(ctx context.Context, in *ActivateAdditionalNodesRequest, opts ...grpc.CallOption) (API_ActivateAdditionalNodesClient, error)
|
||||
ActivateAsAdditionalCoordinator(ctx context.Context, in *ActivateAsAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAsAdditionalCoordinatorResponse, error)
|
||||
ActivateAdditionalCoordinator(ctx context.Context, in *ActivateAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAdditionalCoordinatorResponse, error)
|
||||
@ -79,17 +79,39 @@ func (x *aPIActivateAsCoordinatorClient) Recv() (*ActivateAsCoordinatorResponse,
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) ActivateAsNode(ctx context.Context, in *ActivateAsNodeRequest, opts ...grpc.CallOption) (*ActivateAsNodeResponse, error) {
|
||||
out := new(ActivateAsNodeResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/ActivateAsNode", in, out, opts...)
|
||||
func (c *aPIClient) ActivateAsNode(ctx context.Context, opts ...grpc.CallOption) (API_ActivateAsNodeClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &API_ServiceDesc.Streams[1], "/pubapi.API/ActivateAsNode", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
x := &aPIActivateAsNodeClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type API_ActivateAsNodeClient interface {
|
||||
Send(*ActivateAsNodeRequest) error
|
||||
Recv() (*ActivateAsNodeResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type aPIActivateAsNodeClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *aPIActivateAsNodeClient) Send(m *ActivateAsNodeRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *aPIActivateAsNodeClient) Recv() (*ActivateAsNodeResponse, error) {
|
||||
m := new(ActivateAsNodeResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) ActivateAdditionalNodes(ctx context.Context, in *ActivateAdditionalNodesRequest, opts ...grpc.CallOption) (API_ActivateAdditionalNodesClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &API_ServiceDesc.Streams[1], "/pubapi.API/ActivateAdditionalNodes", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &API_ServiceDesc.Streams[2], "/pubapi.API/ActivateAdditionalNodes", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -180,7 +202,7 @@ func (c *aPIClient) RequestStateDiskKey(ctx context.Context, in *RequestStateDis
|
||||
type APIServer interface {
|
||||
GetState(context.Context, *GetStateRequest) (*GetStateResponse, error)
|
||||
ActivateAsCoordinator(*ActivateAsCoordinatorRequest, API_ActivateAsCoordinatorServer) error
|
||||
ActivateAsNode(context.Context, *ActivateAsNodeRequest) (*ActivateAsNodeResponse, error)
|
||||
ActivateAsNode(API_ActivateAsNodeServer) error
|
||||
ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error
|
||||
ActivateAsAdditionalCoordinator(context.Context, *ActivateAsAdditionalCoordinatorRequest) (*ActivateAsAdditionalCoordinatorResponse, error)
|
||||
ActivateAdditionalCoordinator(context.Context, *ActivateAdditionalCoordinatorRequest) (*ActivateAdditionalCoordinatorResponse, error)
|
||||
@ -201,8 +223,8 @@ func (UnimplementedAPIServer) GetState(context.Context, *GetStateRequest) (*GetS
|
||||
func (UnimplementedAPIServer) ActivateAsCoordinator(*ActivateAsCoordinatorRequest, API_ActivateAsCoordinatorServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method ActivateAsCoordinator not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) ActivateAsNode(context.Context, *ActivateAsNodeRequest) (*ActivateAsNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ActivateAsNode not implemented")
|
||||
func (UnimplementedAPIServer) ActivateAsNode(API_ActivateAsNodeServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method ActivateAsNode not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method ActivateAdditionalNodes not implemented")
|
||||
@ -277,22 +299,30 @@ func (x *aPIActivateAsCoordinatorServer) Send(m *ActivateAsCoordinatorResponse)
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _API_ActivateAsNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ActivateAsNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
func _API_ActivateAsNode_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(APIServer).ActivateAsNode(&aPIActivateAsNodeServer{stream})
|
||||
}
|
||||
|
||||
type API_ActivateAsNodeServer interface {
|
||||
Send(*ActivateAsNodeResponse) error
|
||||
Recv() (*ActivateAsNodeRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type aPIActivateAsNodeServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *aPIActivateAsNodeServer) Send(m *ActivateAsNodeResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *aPIActivateAsNodeServer) Recv() (*ActivateAsNodeRequest, error) {
|
||||
m := new(ActivateAsNodeRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).ActivateAsNode(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/ActivateAsNode",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).ActivateAsNode(ctx, req.(*ActivateAsNodeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _API_ActivateAdditionalNodes_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
@ -435,10 +465,6 @@ var API_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "GetState",
|
||||
Handler: _API_GetState_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ActivateAsNode",
|
||||
Handler: _API_ActivateAsNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ActivateAsAdditionalCoordinator",
|
||||
Handler: _API_ActivateAsAdditionalCoordinator_Handler,
|
||||
@ -470,6 +496,12 @@ var API_ServiceDesc = grpc.ServiceDesc{
|
||||
Handler: _API_ActivateAsCoordinator_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "ActivateAsNode",
|
||||
Handler: _API_ActivateAsNode_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "ActivateAdditionalNodes",
|
||||
Handler: _API_ActivateAdditionalNodes_Handler,
|
||||
|
@ -129,8 +129,8 @@ func (s *stubAPIServer) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinator
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stubAPIServer) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeRequest) (*pubproto.ActivateAsNodeResponse, error) {
|
||||
return nil, nil
|
||||
func (s *stubAPIServer) ActivateAsNode(pubproto.API_ActivateAsNodeServer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stubAPIServer) ActivateAdditionalNodes(in *pubproto.ActivateAdditionalNodesRequest, srv pubproto.API_ActivateAdditionalNodesServer) error {
|
||||
|
Loading…
x
Reference in New Issue
Block a user