mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-06-06 21:59:12 -04:00
update state disk passphrase on activation
Signed-off-by: Malte Poll <mp@edgeless.systems>
This commit is contained in:
parent
1b6ecf27ee
commit
3ce3978063
11 changed files with 906 additions and 389 deletions
|
@ -115,9 +115,17 @@ func TestConcurrent(t *testing.T) {
|
||||||
_ = activateCoordinator(require, dialer, coordinatorIP, bindPort, nodeIPs)
|
_ = activateCoordinator(require, dialer, coordinatorIP, bindPort, nodeIPs)
|
||||||
}
|
}
|
||||||
|
|
||||||
actNode := func(papi *pubapi.API) {
|
actNode := func(target string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
_, err := papi.ActivateAsNode(context.Background(), &pubproto.ActivateAsNodeRequest{})
|
conn, _ := dialGRPC(context.Background(), dialer, target)
|
||||||
|
defer conn.Close()
|
||||||
|
client := pubproto.NewAPIClient(conn)
|
||||||
|
stream, err := client.ActivateAsNode(context.Background())
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NoError(stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||||
|
Request: &pubproto.ActivateAsNodeRequest_InitialRequest{},
|
||||||
|
}))
|
||||||
|
_, err = stream.Recv()
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,12 +173,12 @@ func TestConcurrent(t *testing.T) {
|
||||||
wg.Add(26)
|
wg.Add(26)
|
||||||
go actCoord()
|
go actCoord()
|
||||||
go actCoord()
|
go actCoord()
|
||||||
go actNode(coordPAPI)
|
go actNode(net.JoinHostPort(coordinatorIP, bindPort))
|
||||||
go actNode(coordPAPI)
|
go actNode(net.JoinHostPort(coordinatorIP, bindPort))
|
||||||
go actNode(nodePAPI1)
|
go actNode(net.JoinHostPort(nodeIPs[0], bindPort))
|
||||||
go actNode(nodePAPI1)
|
go actNode(net.JoinHostPort(nodeIPs[0], bindPort))
|
||||||
go actNode(nodePAPI2)
|
go actNode(net.JoinHostPort(nodeIPs[1], bindPort))
|
||||||
go actNode(nodePAPI2)
|
go actNode(net.JoinHostPort(nodeIPs[1], bindPort))
|
||||||
go updNode(coordPAPI, false)
|
go updNode(coordPAPI, false)
|
||||||
go updNode(coordPAPI, false)
|
go updNode(coordPAPI, false)
|
||||||
go updNode(nodePAPI1, true)
|
go updNode(nodePAPI1, true)
|
||||||
|
|
|
@ -97,20 +97,6 @@ func TestLegacyActivateCoordinator(t *testing.T) {
|
||||||
// Coordinator cannot be activated a second time
|
// Coordinator cannot be activated a second time
|
||||||
assert.Error(coordinatorAPI.ActivateAsCoordinator(activationReq, testActivationSvr))
|
assert.Error(coordinatorAPI.ActivateAsCoordinator(activationReq, testActivationSvr))
|
||||||
|
|
||||||
// Node cannot be activated a second time
|
|
||||||
nodeResp, err := nodeAPI3.ActivateAsNode(context.TODO(), &pubproto.ActivateAsNodeRequest{
|
|
||||||
NodeVpnIp: "192.0.2.1:9004",
|
|
||||||
Peers: []*pubproto.Peer{{
|
|
||||||
VpnPubKey: coordinatorKey,
|
|
||||||
PublicIp: coordinatorIP,
|
|
||||||
VpnIp: "10.118.0.1",
|
|
||||||
}},
|
|
||||||
OwnerId: []byte("ownerID"),
|
|
||||||
ClusterId: []byte("clusterID"),
|
|
||||||
})
|
|
||||||
assert.Error(err)
|
|
||||||
assert.Nil(nodeResp)
|
|
||||||
|
|
||||||
// Assert Coordinator
|
// Assert Coordinator
|
||||||
peers := coordinatorCore.vpn.(*stubVPN).peers
|
peers := coordinatorCore.vpn.(*stubVPN).peers
|
||||||
assert.Less(3, len(peers))
|
assert.Less(3, len(peers))
|
||||||
|
|
|
@ -117,6 +117,18 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||||
if err := a.core.PersistNodeState(role.Coordinator, ownerID, clusterID); err != nil {
|
if err := a.core.PersistNodeState(role.Coordinator, ownerID, clusterID); err != nil {
|
||||||
return status.Errorf(codes.Internal, "persist node state: %v", err)
|
return status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||||
}
|
}
|
||||||
|
diskUUID, err := a.core.GetDiskUUID()
|
||||||
|
if err != nil {
|
||||||
|
return status.Errorf(codes.Internal, "getting disk uuid: %v", err)
|
||||||
|
}
|
||||||
|
diskKey, err := a.core.GetDataKey(ctx, diskUUID, 32)
|
||||||
|
if err != nil {
|
||||||
|
return status.Errorf(codes.Internal, "getting disk key: %v", err)
|
||||||
|
}
|
||||||
|
if err := a.core.UpdateDiskPassphrase(string(diskKey)); err != nil {
|
||||||
|
return status.Errorf(codes.Internal, "updating disk key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
adminVPNIP, err := a.core.GetNextNodeIP()
|
adminVPNIP, err := a.core.GetNextNodeIP()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return status.Errorf(codes.Internal, "requesting node IP address: %v", err)
|
return status.Errorf(codes.Internal, "requesting node IP address: %v", err)
|
||||||
|
@ -273,18 +285,78 @@ func (a *API) activateNode(nodePublicIP string, nodeVPNIP string, initialPeers [
|
||||||
|
|
||||||
client := pubproto.NewAPIClient(conn)
|
client := pubproto.NewAPIClient(conn)
|
||||||
|
|
||||||
resp, err := client.ActivateAsNode(ctx, &pubproto.ActivateAsNodeRequest{
|
stream, err := client.ActivateAsNode(ctx)
|
||||||
|
if err != nil {
|
||||||
|
a.logger.Error("connecting to node for activation failed", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
coordinator -> initial request -> node
|
||||||
|
*/
|
||||||
|
if err := stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||||
|
Request: &pubproto.ActivateAsNodeRequest_InitialRequest{
|
||||||
|
InitialRequest: &pubproto.ActivateAsNodeInitialRequest{
|
||||||
NodeVpnIp: nodeVPNIP,
|
NodeVpnIp: nodeVPNIP,
|
||||||
Peers: initialPeers,
|
Peers: initialPeers,
|
||||||
OwnerId: ownerID,
|
OwnerId: ownerID,
|
||||||
ClusterId: clusterID,
|
ClusterId: clusterID,
|
||||||
})
|
},
|
||||||
if err != nil {
|
},
|
||||||
a.logger.Error("node activation failed", zap.Error(err))
|
}); err != nil {
|
||||||
|
a.logger.Error("sending initial message to node for activation failed", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp.NodeVpnPubKey, nil
|
/*
|
||||||
|
coordinator <- state disk uuid <- node
|
||||||
|
*/
|
||||||
|
// wait for message containing the nodes disk UUID to send back the permanent encryption key
|
||||||
|
message, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
a.logger.Error("expected disk UUID message but no message received", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
diskUUID, ok := message.GetResponse().(*pubproto.ActivateAsNodeResponse_StateDiskUuid)
|
||||||
|
if !ok {
|
||||||
|
a.logger.Error("expected disk UUID message but got different message")
|
||||||
|
return nil, errors.New("expected state disk UUID but got different message type")
|
||||||
|
}
|
||||||
|
diskKey, err := a.core.GetDataKey(ctx, diskUUID.StateDiskUuid, 32)
|
||||||
|
if err != nil {
|
||||||
|
a.logger.Error("failed to derive node's disk key")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
coordinator -> state disk key -> node
|
||||||
|
*/
|
||||||
|
// send back state disk encryption key
|
||||||
|
if err := stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||||
|
Request: &pubproto.ActivateAsNodeRequest_StateDiskKey{
|
||||||
|
StateDiskKey: diskKey,
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
a.logger.Error("sending state disk key to node on activation failed", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
coordinator <- VPN public key <- node
|
||||||
|
*/
|
||||||
|
// wait for message containing the node VPN pubkey
|
||||||
|
message, err = stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
a.logger.Error("expected node VPN pubkey but no message received", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
vpnPubKey, ok := message.GetResponse().(*pubproto.ActivateAsNodeResponse_NodeVpnPubKey)
|
||||||
|
if !ok {
|
||||||
|
a.logger.Error("expected node VPN pubkey but got different message")
|
||||||
|
return nil, errors.New("expected node VPN pub key but got different message type")
|
||||||
|
}
|
||||||
|
|
||||||
|
return vpnPubKey.NodeVpnPubKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// assemblePeerStruct combines all information of this peer into a peer struct.
|
// assemblePeerStruct combines all information of this peer into a peer struct.
|
||||||
|
|
|
@ -3,6 +3,7 @@ package pubapi
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -25,9 +26,9 @@ import (
|
||||||
func TestActivateAsCoordinator(t *testing.T) {
|
func TestActivateAsCoordinator(t *testing.T) {
|
||||||
someErr := errors.New("failed")
|
someErr := errors.New("failed")
|
||||||
coordinatorPubKey := []byte{6, 7, 8}
|
coordinatorPubKey := []byte{6, 7, 8}
|
||||||
testNode1 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.11", VPNPubKey: []byte{1, 2, 3}}}
|
testNode1 := newStubPeer("192.0.2.11", []byte{1, 2, 3})
|
||||||
testNode2 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.12", VPNPubKey: []byte{2, 3, 4}}}
|
testNode2 := newStubPeer("192.0.2.12", []byte{2, 3, 4})
|
||||||
testNode3 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.13", VPNPubKey: []byte{3, 4, 5}}}
|
testNode3 := newStubPeer("192.0.2.13", []byte{3, 4, 5})
|
||||||
expectedNode1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "10.118.0.11", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
expectedNode1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "10.118.0.11", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
||||||
expectedNode2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "10.118.0.12", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
expectedNode2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "10.118.0.12", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
||||||
expectedNode3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "10.118.0.13", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
expectedNode3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "10.118.0.13", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
||||||
|
@ -192,9 +193,9 @@ func TestActivateAsCoordinator(t *testing.T) {
|
||||||
|
|
||||||
func TestActivateAdditionalNodes(t *testing.T) {
|
func TestActivateAdditionalNodes(t *testing.T) {
|
||||||
someErr := errors.New("failed")
|
someErr := errors.New("failed")
|
||||||
testNode1 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.11", VPNPubKey: []byte{1, 2, 3}}}
|
testNode1 := newStubPeer("192.0.2.11", []byte{1, 2, 3})
|
||||||
testNode2 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.12", VPNPubKey: []byte{2, 3, 4}}}
|
testNode2 := newStubPeer("192.0.2.12", []byte{2, 3, 4})
|
||||||
testNode3 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.13", VPNPubKey: []byte{3, 4, 5}}}
|
testNode3 := newStubPeer("192.0.2.13", []byte{3, 4, 5})
|
||||||
expectedNode1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "10.118.0.11", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
expectedNode1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "10.118.0.11", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
||||||
expectedNode2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "10.118.0.12", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
expectedNode2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "10.118.0.12", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
||||||
expectedNode3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "10.118.0.13", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
expectedNode3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "10.118.0.13", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
||||||
|
@ -324,13 +325,42 @@ func TestAssemblePeerStruct(t *testing.T) {
|
||||||
|
|
||||||
type stubPeer struct {
|
type stubPeer struct {
|
||||||
peer peer.Peer
|
peer peer.Peer
|
||||||
|
activateAsNodeMessages []*pubproto.ActivateAsNodeResponse
|
||||||
|
activateAsNodeReceive int
|
||||||
activateErr error
|
activateErr error
|
||||||
joinErr error
|
joinErr error
|
||||||
pubproto.UnimplementedAPIServer
|
pubproto.UnimplementedAPIServer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *stubPeer) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeRequest) (*pubproto.ActivateAsNodeResponse, error) {
|
func newStubPeer(publicIP string, vpnPubKey []byte) *stubPeer {
|
||||||
return &pubproto.ActivateAsNodeResponse{NodeVpnPubKey: n.peer.VPNPubKey}, n.activateErr
|
return &stubPeer{
|
||||||
|
peer: peer.Peer{PublicIP: publicIP, VPNPubKey: vpnPubKey},
|
||||||
|
activateAsNodeMessages: []*pubproto.ActivateAsNodeResponse{
|
||||||
|
{Response: &pubproto.ActivateAsNodeResponse_StateDiskUuid{StateDiskUuid: "state-disk-uuid"}},
|
||||||
|
{Response: &pubproto.ActivateAsNodeResponse_NodeVpnPubKey{NodeVpnPubKey: vpnPubKey}},
|
||||||
|
},
|
||||||
|
activateAsNodeReceive: 2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *stubPeer) ActivateAsNode(stream pubproto.API_ActivateAsNodeServer) error {
|
||||||
|
for _, message := range n.activateAsNodeMessages {
|
||||||
|
err := stream.Send(message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 0; i < n.activateAsNodeReceive; i++ {
|
||||||
|
_, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, err := stream.Recv(); err != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return n.activateErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *stubPeer) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.ActivateAsAdditionalCoordinatorRequest) (*pubproto.ActivateAsAdditionalCoordinatorResponse, error) {
|
func (n *stubPeer) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.ActivateAsAdditionalCoordinatorRequest) (*pubproto.ActivateAsAdditionalCoordinatorResponse, error) {
|
||||||
|
|
|
@ -74,6 +74,25 @@ func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.
|
||||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// persist node state on disk
|
||||||
|
if err := a.core.PersistNodeState(role.Coordinator, in.OwnerId, in.ClusterId); err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||||
|
}
|
||||||
|
diskUUID, err := a.core.GetDiskUUID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "getting disk uuid: %v", err)
|
||||||
|
}
|
||||||
|
diskKey, err := a.core.GetDataKey(ctx, diskUUID, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "getting disk key: %v", err)
|
||||||
|
}
|
||||||
|
if err := a.core.UpdateDiskPassphrase(string(diskKey)); err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "updating disk key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// regularly get (peer) updates from etcd
|
||||||
|
// start update before manual peer add to omit race conditions when multiple coordinator are activating nodes
|
||||||
|
|
||||||
thisPeer, err := a.assemblePeerStruct(in.AssignedVpnIp, role.Coordinator)
|
thisPeer, err := a.assemblePeerStruct(in.AssignedVpnIp, role.Coordinator)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "assembling coordinator peer struct: %v", err)
|
return nil, status.Errorf(codes.Internal, "assembling coordinator peer struct: %v", err)
|
||||||
|
|
|
@ -16,18 +16,64 @@ import (
|
||||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
+-------------+ +-------+
|
||||||
|
| coordinator | | node |
|
||||||
|
+-------------+ +-------+
|
||||||
|
| |
|
||||||
|
| initial request |
|
||||||
|
|-------------------->|
|
||||||
|
| | -------------------------------------------\
|
||||||
|
| |-| update state "NodeWaitingForClusterJoin" |
|
||||||
|
| | |------------------------------------------|
|
||||||
|
| | ------------\
|
||||||
|
| |-| setup VPN |
|
||||||
|
| | |-----------|
|
||||||
|
| | ---------------------\
|
||||||
|
| |-| persist node state |
|
||||||
|
| | |--------------------|
|
||||||
|
| |
|
||||||
|
| state disk uuid |
|
||||||
|
|<--------------------|
|
||||||
|
------------------------\ | |
|
||||||
|
| derive state disk key |-| |
|
||||||
|
|-----------------------| | |
|
||||||
|
| |
|
||||||
|
| state disk key |
|
||||||
|
|-------------------->|
|
||||||
|
| | -------------------------------\
|
||||||
|
| |-| update state disk passphrase |
|
||||||
|
| | |------------------------------|
|
||||||
|
| |
|
||||||
|
| VPN public key |
|
||||||
|
|<--------------------|
|
||||||
|
| |
|
||||||
|
*/
|
||||||
|
|
||||||
// ActivateAsNode is the RPC call to activate a Node.
|
// ActivateAsNode is the RPC call to activate a Node.
|
||||||
func (a *API) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeRequest) (resp *pubproto.ActivateAsNodeResponse, reterr error) {
|
func (a *API) ActivateAsNode(stream pubproto.API_ActivateAsNodeServer) (reterr error) {
|
||||||
a.mut.Lock()
|
a.mut.Lock()
|
||||||
defer a.mut.Unlock()
|
defer a.mut.Unlock()
|
||||||
|
|
||||||
if err := a.core.RequireState(state.AcceptingInit); err != nil {
|
if err := a.core.RequireState(state.AcceptingInit); err != nil {
|
||||||
return nil, status.Errorf(codes.FailedPrecondition, "node is not in required state for activation: %v", err)
|
return status.Errorf(codes.FailedPrecondition, "node is not in required state for activation: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
coordinator -> initial request -> node
|
||||||
|
*/
|
||||||
|
message, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return status.Errorf(codes.Internal, "could not receive initial request from coordinator: %v", err)
|
||||||
|
}
|
||||||
|
initialRequest, ok := message.GetRequest().(*pubproto.ActivateAsNodeRequest_InitialRequest)
|
||||||
|
if !ok {
|
||||||
|
return status.Error(codes.Internal, "expected initial request but got different message type")
|
||||||
|
}
|
||||||
|
in := initialRequest.InitialRequest
|
||||||
if len(in.OwnerId) == 0 || len(in.ClusterId) == 0 {
|
if len(in.OwnerId) == 0 || len(in.ClusterId) == 0 {
|
||||||
a.logger.Error("missing data to taint worker node as initialized")
|
a.logger.Error("missing data to taint worker node as initialized")
|
||||||
return nil, status.Error(codes.InvalidArgument, "missing data to taint worker node as initialized")
|
return status.Error(codes.InvalidArgument, "missing data to taint worker node as initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If any of the following actions fail, we cannot revert.
|
// If any of the following actions fail, we cannot revert.
|
||||||
|
@ -42,33 +88,75 @@ func (a *API) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeReq
|
||||||
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
|
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
|
||||||
// Any new additions to ActivateAsNode MUST come after
|
// Any new additions to ActivateAsNode MUST come after
|
||||||
if err := a.core.AdvanceState(state.NodeWaitingForClusterJoin, in.OwnerId, in.ClusterId); err != nil {
|
if err := a.core.AdvanceState(state.NodeWaitingForClusterJoin, in.OwnerId, in.ClusterId); err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "advance node state: %v", err)
|
return status.Errorf(codes.Internal, "advance node state: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnPubKey, err := a.core.GetVPNPubKey()
|
vpnPubKey, err := a.core.GetVPNPubKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "get vpn publicKey: %v", err)
|
return status.Errorf(codes.Internal, "get vpn publicKey: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.core.SetVPNIP(in.NodeVpnIp); err != nil {
|
if err := a.core.SetVPNIP(in.NodeVpnIp); err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "setting node vpn IP address: %v", err)
|
return status.Errorf(codes.Internal, "setting node vpn IP address: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// add initial peers
|
// add initial peers
|
||||||
if err := a.core.UpdatePeers(peer.FromPubProto(in.Peers)); err != nil {
|
if err := a.core.UpdatePeers(peer.FromPubProto(in.Peers)); err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "synchronizing peers with vpn state: %v", err)
|
return status.Errorf(codes.Internal, "synchronizing peers with vpn state: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// persist node state on disk
|
// persist node state on disk
|
||||||
if err := a.core.PersistNodeState(role.Node, in.OwnerId, in.ClusterId); err != nil {
|
if err := a.core.PersistNodeState(role.Node, in.OwnerId, in.ClusterId); err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "persist node state: %v", err)
|
return status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
coordinator <- state disk uuid <- node
|
||||||
|
*/
|
||||||
|
diskUUID, err := a.core.GetDiskUUID()
|
||||||
|
if err != nil {
|
||||||
|
return status.Errorf(codes.Internal, "get disk uuid: %v", err)
|
||||||
|
}
|
||||||
|
if err := stream.Send(&pubproto.ActivateAsNodeResponse{
|
||||||
|
Response: &pubproto.ActivateAsNodeResponse_StateDiskUuid{StateDiskUuid: diskUUID},
|
||||||
|
}); err != nil {
|
||||||
|
return status.Errorf(codes.Internal, "%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
coordinator -> state disk key -> node
|
||||||
|
*/
|
||||||
|
message, err = stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return status.Errorf(codes.Internal, "could not receive state disk key from coordinator: %v", err)
|
||||||
|
}
|
||||||
|
diskKey, ok := message.GetRequest().(*pubproto.ActivateAsNodeRequest_StateDiskKey)
|
||||||
|
if !ok {
|
||||||
|
return status.Error(codes.Internal, "expected state disk key but got different message type")
|
||||||
|
}
|
||||||
|
if diskKey.StateDiskKey == nil {
|
||||||
|
return status.Error(codes.Internal, "empty state disk key message from coordinator")
|
||||||
|
}
|
||||||
|
if err := a.core.UpdateDiskPassphrase(string(diskKey.StateDiskKey)); err != nil {
|
||||||
|
return status.Errorf(codes.Internal, "%v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// regularly get (peer) updates from Coordinator
|
// regularly get (peer) updates from Coordinator
|
||||||
a.wgClose.Add(1)
|
a.wgClose.Add(1)
|
||||||
go a.updateLoop()
|
go a.updateLoop()
|
||||||
|
|
||||||
return &pubproto.ActivateAsNodeResponse{NodeVpnPubKey: vpnPubKey}, nil
|
/*
|
||||||
|
coordinator <- VPN public key <- node
|
||||||
|
*/
|
||||||
|
if err := stream.Send(&pubproto.ActivateAsNodeResponse{
|
||||||
|
Response: &pubproto.ActivateAsNodeResponse_NodeVpnPubKey{
|
||||||
|
NodeVpnPubKey: vpnPubKey,
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
return status.Errorf(codes.Internal, "%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// JoinCluster is the RPC call to request this node to join the cluster.
|
// JoinCluster is the RPC call to request this node to join the cluster.
|
||||||
|
|
|
@ -3,9 +3,12 @@ package pubapi
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/coordinator/atls"
|
||||||
|
"github.com/edgelesssys/constellation/coordinator/core"
|
||||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||||
"github.com/edgelesssys/constellation/coordinator/role"
|
"github.com/edgelesssys/constellation/coordinator/role"
|
||||||
|
@ -16,6 +19,7 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/zap/zaptest"
|
"go.uber.org/zap/zaptest"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -30,6 +34,7 @@ func TestActivateAsNode(t *testing.T) {
|
||||||
state state.State
|
state state.State
|
||||||
getUpdateErr error
|
getUpdateErr error
|
||||||
setVPNIPErr error
|
setVPNIPErr error
|
||||||
|
messageSequenceOverride []string
|
||||||
expectErr bool
|
expectErr bool
|
||||||
expectedState state.State
|
expectedState state.State
|
||||||
}{
|
}{
|
||||||
|
@ -68,6 +73,38 @@ func TestActivateAsNode(t *testing.T) {
|
||||||
expectErr: true,
|
expectErr: true,
|
||||||
expectedState: state.Failed,
|
expectedState: state.Failed,
|
||||||
},
|
},
|
||||||
|
"no messages sent to node": {
|
||||||
|
initialPeers: []peer.Peer{peer1},
|
||||||
|
updatedPeers: []peer.Peer{peer2},
|
||||||
|
state: state.AcceptingInit,
|
||||||
|
messageSequenceOverride: []string{},
|
||||||
|
expectErr: true,
|
||||||
|
expectedState: state.AcceptingInit,
|
||||||
|
},
|
||||||
|
"only initial message sent to node": {
|
||||||
|
initialPeers: []peer.Peer{peer1},
|
||||||
|
updatedPeers: []peer.Peer{peer2},
|
||||||
|
state: state.AcceptingInit,
|
||||||
|
messageSequenceOverride: []string{"initialRequest"},
|
||||||
|
expectErr: true,
|
||||||
|
expectedState: state.Failed,
|
||||||
|
},
|
||||||
|
"wrong initial message sent to node": {
|
||||||
|
initialPeers: []peer.Peer{peer1},
|
||||||
|
updatedPeers: []peer.Peer{peer2},
|
||||||
|
state: state.AcceptingInit,
|
||||||
|
messageSequenceOverride: []string{"stateDiskKey"},
|
||||||
|
expectErr: true,
|
||||||
|
expectedState: state.AcceptingInit,
|
||||||
|
},
|
||||||
|
"initial message sent twice to node": {
|
||||||
|
initialPeers: []peer.Peer{peer1},
|
||||||
|
updatedPeers: []peer.Peer{peer2},
|
||||||
|
state: state.AcceptingInit,
|
||||||
|
messageSequenceOverride: []string{"initialRequest", "initialRequest"},
|
||||||
|
expectErr: true,
|
||||||
|
expectedState: state.Failed,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
|
@ -75,16 +112,24 @@ func TestActivateAsNode(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
|
||||||
const nodeVPNIP = "192.0.2.2"
|
const (
|
||||||
|
nodeIP = "192.0.2.2"
|
||||||
|
nodeVPNIP = "10.118.0.2"
|
||||||
|
)
|
||||||
vpnPubKey := []byte{7, 8, 9}
|
vpnPubKey := []byte{7, 8, 9}
|
||||||
ownerID := []byte("ownerID")
|
ownerID := []byte("ownerID")
|
||||||
clusterID := []byte("clusterID")
|
clusterID := []byte("clusterID")
|
||||||
|
stateDiskKey := []byte("stateDiskKey")
|
||||||
|
messageSequence := []string{"initialRequest", "stateDiskKey"}
|
||||||
|
if tc.messageSequenceOverride != nil {
|
||||||
|
messageSequence = tc.messageSequenceOverride
|
||||||
|
}
|
||||||
|
|
||||||
logger := zaptest.NewLogger(t)
|
logger := zaptest.NewLogger(t)
|
||||||
core := &fakeCore{state: tc.state, vpnPubKey: vpnPubKey, setVPNIPErr: tc.setVPNIPErr}
|
cor := &fakeCore{state: tc.state, vpnPubKey: vpnPubKey, setVPNIPErr: tc.setVPNIPErr}
|
||||||
dialer := testdialer.NewBufconnDialer()
|
dialer := testdialer.NewBufconnDialer()
|
||||||
|
|
||||||
api := New(logger, core, dialer, nil, nil, nil)
|
api := New(logger, cor, dialer, nil, nil, nil)
|
||||||
defer api.Close()
|
defer api.Close()
|
||||||
|
|
||||||
vserver := grpc.NewServer()
|
vserver := grpc.NewServer()
|
||||||
|
@ -93,14 +138,15 @@ func TestActivateAsNode(t *testing.T) {
|
||||||
go vserver.Serve(dialer.GetListener(net.JoinHostPort("10.118.0.1", vpnAPIPort)))
|
go vserver.Serve(dialer.GetListener(net.JoinHostPort("10.118.0.1", vpnAPIPort)))
|
||||||
defer vserver.GracefulStop()
|
defer vserver.GracefulStop()
|
||||||
|
|
||||||
resp, err := api.ActivateAsNode(context.Background(), &pubproto.ActivateAsNodeRequest{
|
tlsConfig, err := atls.CreateAttestationServerTLSConfig(&core.MockIssuer{})
|
||||||
NodeVpnIp: nodeVPNIP,
|
require.NoError(err)
|
||||||
Peers: peer.ToPubProto(tc.initialPeers),
|
pubserver := grpc.NewServer(grpc.Creds(credentials.NewTLS(tlsConfig)))
|
||||||
OwnerId: ownerID,
|
pubproto.RegisterAPIServer(pubserver, api)
|
||||||
ClusterId: clusterID,
|
go pubserver.Serve(dialer.GetListener(net.JoinHostPort(nodeIP, endpointAVPNPort)))
|
||||||
})
|
defer pubserver.GracefulStop()
|
||||||
|
|
||||||
assert.Equal(tc.expectedState, core.state)
|
_, nodeVPNPubKey, err := activateNode(require, dialer, messageSequence, nodeIP, "9000", nodeVPNIP, peer.ToPubProto(tc.initialPeers), ownerID, clusterID, stateDiskKey)
|
||||||
|
assert.Equal(tc.expectedState, cor.state)
|
||||||
|
|
||||||
if tc.expectErr {
|
if tc.expectErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
|
@ -108,21 +154,21 @@ func TestActivateAsNode(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
assert.Equal(vpnPubKey, resp.NodeVpnPubKey)
|
assert.Equal(vpnPubKey, nodeVPNPubKey)
|
||||||
assert.Equal(nodeVPNIP, core.vpnIP)
|
assert.Equal(nodeVPNIP, cor.vpnIP)
|
||||||
assert.Equal(ownerID, core.ownerID)
|
assert.Equal(ownerID, cor.ownerID)
|
||||||
assert.Equal(clusterID, core.clusterID)
|
assert.Equal(clusterID, cor.clusterID)
|
||||||
|
|
||||||
api.Close() // blocks until update loop finished
|
api.Close() // blocks until update loop finished
|
||||||
|
|
||||||
if tc.getUpdateErr == nil {
|
if tc.getUpdateErr == nil {
|
||||||
require.Len(core.updatedPeers, 2)
|
require.Len(cor.updatedPeers, 2)
|
||||||
assert.Equal(tc.updatedPeers, core.updatedPeers[1])
|
assert.Equal(tc.updatedPeers, cor.updatedPeers[1])
|
||||||
} else {
|
} else {
|
||||||
require.Len(core.updatedPeers, 1)
|
require.Len(cor.updatedPeers, 1)
|
||||||
}
|
}
|
||||||
assert.Equal(tc.initialPeers, core.updatedPeers[0])
|
assert.Equal(tc.initialPeers, cor.updatedPeers[0])
|
||||||
assert.Equal([]role.Role{role.Node}, core.persistNodeStateRoles)
|
assert.Equal([]role.Role{role.Node}, cor.persistNodeStateRoles)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -276,6 +322,83 @@ func TestJoinCluster(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func activateNode(require *require.Assertions, dialer Dialer, messageSequence []string, nodeIP, bindPort, nodeVPNIP string, peers []*pubproto.Peer, ownerID, clusterID, stateDiskKey []byte) (string, []byte, error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
conn, err := dialGRPC(ctx, dialer, net.JoinHostPort(nodeIP, bindPort))
|
||||||
|
require.NoError(err)
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
client := pubproto.NewAPIClient(conn)
|
||||||
|
stream, err := client.ActivateAsNode(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, message := range messageSequence {
|
||||||
|
switch message {
|
||||||
|
case "initialRequest":
|
||||||
|
err = stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||||
|
Request: &pubproto.ActivateAsNodeRequest_InitialRequest{
|
||||||
|
InitialRequest: &pubproto.ActivateAsNodeInitialRequest{
|
||||||
|
NodeVpnIp: nodeVPNIP,
|
||||||
|
Peers: peers,
|
||||||
|
OwnerId: ownerID,
|
||||||
|
ClusterId: clusterID,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
case "stateDiskKey":
|
||||||
|
err = stream.Send(&pubproto.ActivateAsNodeRequest{
|
||||||
|
Request: &pubproto.ActivateAsNodeRequest_StateDiskKey{
|
||||||
|
StateDiskKey: stateDiskKey,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("unknown message in activation")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.NoError(stream.CloseSend())
|
||||||
|
|
||||||
|
diskUUIDReq, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
diskUUID := diskUUIDReq.GetStateDiskUuid()
|
||||||
|
|
||||||
|
vpnPubKeyReq, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
nodeVPNPubKey := vpnPubKeyReq.GetNodeVpnPubKey()
|
||||||
|
|
||||||
|
_, err = stream.Recv()
|
||||||
|
if err != io.EOF {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return diskUUID, nodeVPNPubKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dialGRPC(ctx context.Context, dialer Dialer, target string) (*grpc.ClientConn, error) {
|
||||||
|
tlsConfig, err := atls.CreateAttestationClientTLSConfig([]atls.Validator{&core.MockValidator{}})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return grpc.DialContext(ctx, target,
|
||||||
|
grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
|
||||||
|
return dialer.DialContext(ctx, "tcp", addr)
|
||||||
|
}),
|
||||||
|
grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
type stubVPNAPI struct {
|
type stubVPNAPI struct {
|
||||||
peers []peer.Peer
|
peers []peer.Peer
|
||||||
getUpdateErr error
|
getUpdateErr error
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -7,7 +7,7 @@ option go_package = "github.com/edgelesssys/constellation/coordinator/pubapi/pub
|
||||||
service API {
|
service API {
|
||||||
rpc GetState(GetStateRequest) returns (GetStateResponse);
|
rpc GetState(GetStateRequest) returns (GetStateResponse);
|
||||||
rpc ActivateAsCoordinator(ActivateAsCoordinatorRequest) returns (stream ActivateAsCoordinatorResponse);
|
rpc ActivateAsCoordinator(ActivateAsCoordinatorRequest) returns (stream ActivateAsCoordinatorResponse);
|
||||||
rpc ActivateAsNode(ActivateAsNodeRequest) returns (ActivateAsNodeResponse);
|
rpc ActivateAsNode(stream ActivateAsNodeRequest) returns (stream ActivateAsNodeResponse);
|
||||||
rpc ActivateAdditionalNodes(ActivateAdditionalNodesRequest) returns (stream ActivateAdditionalNodesResponse);
|
rpc ActivateAdditionalNodes(ActivateAdditionalNodesRequest) returns (stream ActivateAdditionalNodesResponse);
|
||||||
rpc ActivateAsAdditionalCoordinator(ActivateAsAdditionalCoordinatorRequest) returns (ActivateAsAdditionalCoordinatorResponse);
|
rpc ActivateAsAdditionalCoordinator(ActivateAsAdditionalCoordinatorRequest) returns (ActivateAsAdditionalCoordinatorResponse);
|
||||||
rpc ActivateAdditionalCoordinator(ActivateAdditionalCoordinatorRequest) returns (ActivateAdditionalCoordinatorResponse);
|
rpc ActivateAdditionalCoordinator(ActivateAdditionalCoordinatorRequest) returns (ActivateAdditionalCoordinatorResponse);
|
||||||
|
@ -44,6 +44,13 @@ message ActivateAsCoordinatorResponse {
|
||||||
}
|
}
|
||||||
|
|
||||||
message ActivateAsNodeRequest {
|
message ActivateAsNodeRequest {
|
||||||
|
oneof request {
|
||||||
|
ActivateAsNodeInitialRequest initial_request = 1;
|
||||||
|
bytes state_disk_key = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message ActivateAsNodeInitialRequest {
|
||||||
string node_vpn_ip = 1;
|
string node_vpn_ip = 1;
|
||||||
repeated Peer peers = 2;
|
repeated Peer peers = 2;
|
||||||
bytes owner_id = 3;
|
bytes owner_id = 3;
|
||||||
|
@ -51,7 +58,10 @@ message ActivateAsNodeRequest {
|
||||||
}
|
}
|
||||||
|
|
||||||
message ActivateAsNodeResponse {
|
message ActivateAsNodeResponse {
|
||||||
|
oneof response {
|
||||||
bytes node_vpn_pub_key = 1;
|
bytes node_vpn_pub_key = 1;
|
||||||
|
string state_disk_uuid = 2;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
message ActivateAdditionalNodesRequest {
|
message ActivateAdditionalNodesRequest {
|
||||||
|
|
|
@ -20,7 +20,7 @@ const _ = grpc.SupportPackageIsVersion7
|
||||||
type APIClient interface {
|
type APIClient interface {
|
||||||
GetState(ctx context.Context, in *GetStateRequest, opts ...grpc.CallOption) (*GetStateResponse, error)
|
GetState(ctx context.Context, in *GetStateRequest, opts ...grpc.CallOption) (*GetStateResponse, error)
|
||||||
ActivateAsCoordinator(ctx context.Context, in *ActivateAsCoordinatorRequest, opts ...grpc.CallOption) (API_ActivateAsCoordinatorClient, error)
|
ActivateAsCoordinator(ctx context.Context, in *ActivateAsCoordinatorRequest, opts ...grpc.CallOption) (API_ActivateAsCoordinatorClient, error)
|
||||||
ActivateAsNode(ctx context.Context, in *ActivateAsNodeRequest, opts ...grpc.CallOption) (*ActivateAsNodeResponse, error)
|
ActivateAsNode(ctx context.Context, opts ...grpc.CallOption) (API_ActivateAsNodeClient, error)
|
||||||
ActivateAdditionalNodes(ctx context.Context, in *ActivateAdditionalNodesRequest, opts ...grpc.CallOption) (API_ActivateAdditionalNodesClient, error)
|
ActivateAdditionalNodes(ctx context.Context, in *ActivateAdditionalNodesRequest, opts ...grpc.CallOption) (API_ActivateAdditionalNodesClient, error)
|
||||||
ActivateAsAdditionalCoordinator(ctx context.Context, in *ActivateAsAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAsAdditionalCoordinatorResponse, error)
|
ActivateAsAdditionalCoordinator(ctx context.Context, in *ActivateAsAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAsAdditionalCoordinatorResponse, error)
|
||||||
ActivateAdditionalCoordinator(ctx context.Context, in *ActivateAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAdditionalCoordinatorResponse, error)
|
ActivateAdditionalCoordinator(ctx context.Context, in *ActivateAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAdditionalCoordinatorResponse, error)
|
||||||
|
@ -79,17 +79,39 @@ func (x *aPIActivateAsCoordinatorClient) Recv() (*ActivateAsCoordinatorResponse,
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *aPIClient) ActivateAsNode(ctx context.Context, in *ActivateAsNodeRequest, opts ...grpc.CallOption) (*ActivateAsNodeResponse, error) {
|
func (c *aPIClient) ActivateAsNode(ctx context.Context, opts ...grpc.CallOption) (API_ActivateAsNodeClient, error) {
|
||||||
out := new(ActivateAsNodeResponse)
|
stream, err := c.cc.NewStream(ctx, &API_ServiceDesc.Streams[1], "/pubapi.API/ActivateAsNode", opts...)
|
||||||
err := c.cc.Invoke(ctx, "/pubapi.API/ActivateAsNode", in, out, opts...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return out, nil
|
x := &aPIActivateAsNodeClient{stream}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type API_ActivateAsNodeClient interface {
|
||||||
|
Send(*ActivateAsNodeRequest) error
|
||||||
|
Recv() (*ActivateAsNodeResponse, error)
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type aPIActivateAsNodeClient struct {
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *aPIActivateAsNodeClient) Send(m *ActivateAsNodeRequest) error {
|
||||||
|
return x.ClientStream.SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *aPIActivateAsNodeClient) Recv() (*ActivateAsNodeResponse, error) {
|
||||||
|
m := new(ActivateAsNodeResponse)
|
||||||
|
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *aPIClient) ActivateAdditionalNodes(ctx context.Context, in *ActivateAdditionalNodesRequest, opts ...grpc.CallOption) (API_ActivateAdditionalNodesClient, error) {
|
func (c *aPIClient) ActivateAdditionalNodes(ctx context.Context, in *ActivateAdditionalNodesRequest, opts ...grpc.CallOption) (API_ActivateAdditionalNodesClient, error) {
|
||||||
stream, err := c.cc.NewStream(ctx, &API_ServiceDesc.Streams[1], "/pubapi.API/ActivateAdditionalNodes", opts...)
|
stream, err := c.cc.NewStream(ctx, &API_ServiceDesc.Streams[2], "/pubapi.API/ActivateAdditionalNodes", opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -180,7 +202,7 @@ func (c *aPIClient) RequestStateDiskKey(ctx context.Context, in *RequestStateDis
|
||||||
type APIServer interface {
|
type APIServer interface {
|
||||||
GetState(context.Context, *GetStateRequest) (*GetStateResponse, error)
|
GetState(context.Context, *GetStateRequest) (*GetStateResponse, error)
|
||||||
ActivateAsCoordinator(*ActivateAsCoordinatorRequest, API_ActivateAsCoordinatorServer) error
|
ActivateAsCoordinator(*ActivateAsCoordinatorRequest, API_ActivateAsCoordinatorServer) error
|
||||||
ActivateAsNode(context.Context, *ActivateAsNodeRequest) (*ActivateAsNodeResponse, error)
|
ActivateAsNode(API_ActivateAsNodeServer) error
|
||||||
ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error
|
ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error
|
||||||
ActivateAsAdditionalCoordinator(context.Context, *ActivateAsAdditionalCoordinatorRequest) (*ActivateAsAdditionalCoordinatorResponse, error)
|
ActivateAsAdditionalCoordinator(context.Context, *ActivateAsAdditionalCoordinatorRequest) (*ActivateAsAdditionalCoordinatorResponse, error)
|
||||||
ActivateAdditionalCoordinator(context.Context, *ActivateAdditionalCoordinatorRequest) (*ActivateAdditionalCoordinatorResponse, error)
|
ActivateAdditionalCoordinator(context.Context, *ActivateAdditionalCoordinatorRequest) (*ActivateAdditionalCoordinatorResponse, error)
|
||||||
|
@ -201,8 +223,8 @@ func (UnimplementedAPIServer) GetState(context.Context, *GetStateRequest) (*GetS
|
||||||
func (UnimplementedAPIServer) ActivateAsCoordinator(*ActivateAsCoordinatorRequest, API_ActivateAsCoordinatorServer) error {
|
func (UnimplementedAPIServer) ActivateAsCoordinator(*ActivateAsCoordinatorRequest, API_ActivateAsCoordinatorServer) error {
|
||||||
return status.Errorf(codes.Unimplemented, "method ActivateAsCoordinator not implemented")
|
return status.Errorf(codes.Unimplemented, "method ActivateAsCoordinator not implemented")
|
||||||
}
|
}
|
||||||
func (UnimplementedAPIServer) ActivateAsNode(context.Context, *ActivateAsNodeRequest) (*ActivateAsNodeResponse, error) {
|
func (UnimplementedAPIServer) ActivateAsNode(API_ActivateAsNodeServer) error {
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method ActivateAsNode not implemented")
|
return status.Errorf(codes.Unimplemented, "method ActivateAsNode not implemented")
|
||||||
}
|
}
|
||||||
func (UnimplementedAPIServer) ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error {
|
func (UnimplementedAPIServer) ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error {
|
||||||
return status.Errorf(codes.Unimplemented, "method ActivateAdditionalNodes not implemented")
|
return status.Errorf(codes.Unimplemented, "method ActivateAdditionalNodes not implemented")
|
||||||
|
@ -277,22 +299,30 @@ func (x *aPIActivateAsCoordinatorServer) Send(m *ActivateAsCoordinatorResponse)
|
||||||
return x.ServerStream.SendMsg(m)
|
return x.ServerStream.SendMsg(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _API_ActivateAsNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
func _API_ActivateAsNode_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
in := new(ActivateAsNodeRequest)
|
return srv.(APIServer).ActivateAsNode(&aPIActivateAsNodeServer{stream})
|
||||||
if err := dec(in); err != nil {
|
}
|
||||||
|
|
||||||
|
type API_ActivateAsNodeServer interface {
|
||||||
|
Send(*ActivateAsNodeResponse) error
|
||||||
|
Recv() (*ActivateAsNodeRequest, error)
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type aPIActivateAsNodeServer struct {
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *aPIActivateAsNodeServer) Send(m *ActivateAsNodeResponse) error {
|
||||||
|
return x.ServerStream.SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *aPIActivateAsNodeServer) Recv() (*ActivateAsNodeRequest, error) {
|
||||||
|
m := new(ActivateAsNodeRequest)
|
||||||
|
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if interceptor == nil {
|
return m, nil
|
||||||
return srv.(APIServer).ActivateAsNode(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/pubapi.API/ActivateAsNode",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(APIServer).ActivateAsNode(ctx, req.(*ActivateAsNodeRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func _API_ActivateAdditionalNodes_Handler(srv interface{}, stream grpc.ServerStream) error {
|
func _API_ActivateAdditionalNodes_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
|
@ -435,10 +465,6 @@ var API_ServiceDesc = grpc.ServiceDesc{
|
||||||
MethodName: "GetState",
|
MethodName: "GetState",
|
||||||
Handler: _API_GetState_Handler,
|
Handler: _API_GetState_Handler,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
MethodName: "ActivateAsNode",
|
|
||||||
Handler: _API_ActivateAsNode_Handler,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
MethodName: "ActivateAsAdditionalCoordinator",
|
MethodName: "ActivateAsAdditionalCoordinator",
|
||||||
Handler: _API_ActivateAsAdditionalCoordinator_Handler,
|
Handler: _API_ActivateAsAdditionalCoordinator_Handler,
|
||||||
|
@ -470,6 +496,12 @@ var API_ServiceDesc = grpc.ServiceDesc{
|
||||||
Handler: _API_ActivateAsCoordinator_Handler,
|
Handler: _API_ActivateAsCoordinator_Handler,
|
||||||
ServerStreams: true,
|
ServerStreams: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
StreamName: "ActivateAsNode",
|
||||||
|
Handler: _API_ActivateAsNode_Handler,
|
||||||
|
ServerStreams: true,
|
||||||
|
ClientStreams: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
StreamName: "ActivateAdditionalNodes",
|
StreamName: "ActivateAdditionalNodes",
|
||||||
Handler: _API_ActivateAdditionalNodes_Handler,
|
Handler: _API_ActivateAdditionalNodes_Handler,
|
||||||
|
|
|
@ -129,8 +129,8 @@ func (s *stubAPIServer) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinator
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubAPIServer) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeRequest) (*pubproto.ActivateAsNodeResponse, error) {
|
func (s *stubAPIServer) ActivateAsNode(pubproto.API_ActivateAsNodeServer) error {
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubAPIServer) ActivateAdditionalNodes(in *pubproto.ActivateAdditionalNodesRequest, srv pubproto.API_ActivateAdditionalNodesServer) error {
|
func (s *stubAPIServer) ActivateAdditionalNodes(in *pubproto.ActivateAdditionalNodesRequest, srv pubproto.API_ActivateAdditionalNodesServer) error {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue