Co-authored-by: Malte Poll <mp@edgeless.systems>
Co-authored-by: katexochen <katexochen@users.noreply.github.com>
Co-authored-by: Daniel Weiße <dw@edgeless.systems>
Co-authored-by: Thomas Tendyck <tt@edgeless.systems>
Co-authored-by: Benedict Schlueter <bs@edgeless.systems>
Co-authored-by: leongross <leon.gross@rub.de>
Co-authored-by: Moritz Eckert <m1gh7ym0@gmail.com>
This commit is contained in:
Leonard Cohnen 2022-03-22 16:03:15 +01:00
commit 2d8fcd9bf4
362 changed files with 50980 additions and 0 deletions

299
coordinator/pubapi/coord.go Normal file
View file

@ -0,0 +1,299 @@
package pubapi
import (
"context"
"fmt"
"net"
"time"
"github.com/edgelesssys/constellation/coordinator/peer"
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
"github.com/edgelesssys/constellation/coordinator/role"
"github.com/edgelesssys/constellation/coordinator/state"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ActivateAsCoordinator is the RPC call to activate the Coordinator.
func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, srv pubproto.API_ActivateAsCoordinatorServer) (reterr error) {
a.mut.Lock()
defer a.mut.Unlock()
if err := a.core.RequireState(state.AcceptingInit); err != nil {
return status.Errorf(codes.FailedPrecondition, "%v", err)
}
if len(in.MasterSecret) == 0 {
a.logger.Error("missing master secret")
return status.Error(codes.InvalidArgument, "missing master secret")
}
// If any of the following actions fail, we cannot revert
// Thus, mark this peer as failed.
defer func() {
if reterr != nil {
_ = a.core.AdvanceState(state.Failed, nil, nil)
}
}()
// AdvanceState MUST be called before any other functions that are not sanity checks or otherwise required
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
// Any new additions to ActivateAsNode MUST come after
ownerID, clusterID, err := a.core.GetIDs(in.MasterSecret)
if err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
if err := a.core.AdvanceState(state.ActivatingNodes, ownerID, clusterID); err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
if err := a.core.SetUpKMS(ctx, in.StorageUri, in.KmsUri, in.KeyEncryptionKeyId, in.UseExistingKek); err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
coordPeer, err := a.makeCoordinatorPeer()
if err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
if err := a.core.SetVPNIP(coordPeer.VPNIP); err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
if err := a.core.AddPeer(coordPeer); err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
kubeconfig, err := a.core.InitCluster(in.AutoscalingNodeGroups)
if err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
// run the VPN-API server
if err := a.vpnAPIServer.Listen(net.JoinHostPort(coordPeer.VPNIP, vpnAPIPort)); err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
a.wgClose.Add(1)
go func() {
defer a.wgClose.Done()
if err := a.vpnAPIServer.Serve(); err != nil {
panic(err)
}
}()
logToCLI := a.newLogToCLIFunc(func(msg string) error {
return srv.Send(&pubproto.ActivateAsCoordinatorResponse{
Content: &pubproto.ActivateAsCoordinatorResponse_Log{
Log: &pubproto.Log{
Message: msg,
},
},
})
})
// TODO: check performance and maybe make concurrent
if err := a.activateNodes(logToCLI, in.NodePublicEndpoints, coordPeer); err != nil {
a.logger.Error("node activation failed", zap.Error(err))
return status.Errorf(codes.Internal, "%v", err)
}
if err := a.core.SwitchToPersistentStore(); err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
// This effectively gives code execution, so we do this last.
adminVPNIP, err := a.core.AddAdmin(in.AdminVpnPubKey)
if err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
return srv.Send(&pubproto.ActivateAsCoordinatorResponse{
Content: &pubproto.ActivateAsCoordinatorResponse_AdminConfig{
AdminConfig: &pubproto.AdminConfig{
AdminVpnIp: adminVPNIP,
CoordinatorVpnPubKey: coordPeer.VPNPubKey,
Kubeconfig: kubeconfig,
OwnerId: ownerID,
ClusterId: clusterID,
},
},
})
}
// ActivateAdditionalNodes is the RPC call to activate additional nodes.
func (a *API) ActivateAdditionalNodes(in *pubproto.ActivateAdditionalNodesRequest, srv pubproto.API_ActivateAdditionalNodesServer) error {
if err := a.core.RequireState(state.ActivatingNodes); err != nil {
return status.Errorf(codes.FailedPrecondition, "%v", err)
}
coordPeer, err := a.makeCoordinatorPeer()
if err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
logToCLI := a.newLogToCLIFunc(func(msg string) error {
return srv.Send(&pubproto.ActivateAdditionalNodesResponse{
Log: &pubproto.Log{
Message: msg,
},
})
})
// TODO: check performance and maybe make concurrent
if err := a.activateNodes(logToCLI, in.NodePublicEndpoints, coordPeer); err != nil {
a.logger.Error("node activation failed", zap.Error(err))
return status.Errorf(codes.Internal, "%v", err)
}
return srv.Send(&pubproto.ActivateAdditionalNodesResponse{
Log: &pubproto.Log{
Message: "success",
},
})
}
func (a *API) activateNodes(logToCLI logFunc, nodePublicEndpoints []string, coordPeer peer.Peer) error {
// Create initial peer data to be sent to the nodes. Currently, this is just this Coordinator.
initialPeers := peer.ToPubProto([]peer.Peer{coordPeer})
ownerID, clusterID, err := a.core.GetIDs(nil)
if err != nil {
return err
}
// Activate all nodes.
for num, nodePublicEndpoint := range nodePublicEndpoints {
logToCLI("activating node %3d out of %3d nodes", num+1, len(nodePublicEndpoints))
nodeVPNIP, err := a.core.GenerateNextIP()
if err != nil {
a.logger.Error("generation of vpn ips failed", zap.Error(err))
return err
}
nodeVpnPubKey, err := a.activateNode(nodePublicEndpoint, nodeVPNIP, initialPeers, ownerID, clusterID)
if err != nil {
return err
}
peer := peer.Peer{
PublicEndpoint: nodePublicEndpoint,
VPNIP: nodeVPNIP,
VPNPubKey: nodeVpnPubKey,
Role: role.Node,
}
if err := a.core.AddPeer(peer); err != nil {
return err
}
if err := a.joinCluster(nodePublicEndpoint); err != nil {
return err
}
}
// Manually trigger an update operation on all nodes.
// This may be expendable in the future, depending on whether it's acceptable that it takes
// some seconds until the nodes get all peer data via their regular update requests.
_, peers, err := a.core.GetPeers(0)
if err != nil {
return err
}
for _, p := range peers {
if p.Role == role.Node {
if err := a.triggerNodeUpdate(p.PublicEndpoint); err != nil {
a.logger.DPanic("TriggerNodeUpdate failed", zap.Error(err))
}
}
}
return nil
}
func (a *API) activateNode(nodePublicEndpoint string, nodeVPNIP string, initialPeers []*pubproto.Peer, ownerID, clusterID []byte) ([]byte, error) {
ctx, cancel := context.WithTimeout(context.Background(), deadlineDuration)
defer cancel()
conn, err := a.dial(ctx, nodePublicEndpoint)
if err != nil {
return nil, err
}
defer conn.Close()
client := pubproto.NewAPIClient(conn)
resp, err := client.ActivateAsNode(ctx, &pubproto.ActivateAsNodeRequest{
NodeVpnIp: nodeVPNIP,
Peers: initialPeers,
OwnerId: ownerID,
ClusterId: clusterID,
})
if err != nil {
a.logger.Error("node activation failed", zap.Error(err))
return nil, err
}
return resp.NodeVpnPubKey, nil
}
func (a *API) makeCoordinatorPeer() (peer.Peer, error) {
coordinatorVPNPubKey, err := a.core.GetVPNPubKey()
if err != nil {
a.logger.Error("could not get key", zap.Error(err))
return peer.Peer{}, err
}
coordinatorPublicIP, err := a.getPublicIPAddr()
if err != nil {
a.logger.Error("could not get public IP", zap.Error(err))
return peer.Peer{}, err
}
return peer.Peer{
PublicEndpoint: net.JoinHostPort(coordinatorPublicIP, endpointAVPNPort),
VPNIP: a.core.GetCoordinatorVPNIP(),
VPNPubKey: coordinatorVPNPubKey,
Role: role.Coordinator,
}, err
}
func (a *API) newLogToCLIFunc(send func(string) error) logFunc {
return func(format string, v ...interface{}) {
if err := send(fmt.Sprintf(format, v...)); err != nil {
a.logger.Error("logging to CLI failed", zap.Error(err))
}
}
}
func (a *API) joinCluster(nodePublicEndpoint string) error {
ctx, cancel := context.WithTimeout(context.Background(), deadlineDuration)
defer cancel()
// We don't verify the peer certificate here, since JoinCluster triggers a connection over VPN
// The target of the rpc needs to already be part of the VPN to process the request, meaning it is trusted
conn, err := a.dialNoVerify(ctx, nodePublicEndpoint)
if err != nil {
return err
}
defer conn.Close()
client := pubproto.NewAPIClient(conn)
_, err = client.JoinCluster(ctx, &pubproto.JoinClusterRequest{})
return err
}
func (a *API) triggerNodeUpdate(nodePublicEndpoint string) error {
ctx, cancel := context.WithTimeout(context.Background(), deadlineDuration)
defer cancel()
// We don't verify the peer certificate here, since TriggerNodeUpdate triggers a connection over VPN
// The target of the rpc needs to already be part of the VPN to process the request, meaning it is trusted
conn, err := a.dialNoVerify(ctx, nodePublicEndpoint)
if err != nil {
return err
}
defer conn.Close()
client := pubproto.NewAPIClient(conn)
_, err = client.TriggerNodeUpdate(ctx, &pubproto.TriggerNodeUpdateRequest{})
return err
}
type logFunc func(format string, v ...interface{})

View file

@ -0,0 +1,375 @@
package pubapi
import (
"context"
"errors"
"net"
"testing"
"github.com/edgelesssys/constellation/coordinator/atls"
"github.com/edgelesssys/constellation/coordinator/kms"
"github.com/edgelesssys/constellation/coordinator/oid"
"github.com/edgelesssys/constellation/coordinator/peer"
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
"github.com/edgelesssys/constellation/coordinator/role"
"github.com/edgelesssys/constellation/coordinator/state"
"github.com/edgelesssys/constellation/coordinator/util/testdialer"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
func TestActivateAsCoordinator(t *testing.T) {
someErr := errors.New("failed")
coordinatorPubKey := []byte{6, 7, 8}
testNode1 := &stubNode{publicIP: "192.0.2.11", pubKey: []byte{1, 2, 3}}
testNode2 := &stubNode{publicIP: "192.0.2.12", pubKey: []byte{2, 3, 4}}
testNode3 := &stubNode{publicIP: "192.0.2.13", pubKey: []byte{3, 4, 5}}
expectedNode1 := peer.Peer{PublicEndpoint: "192.0.2.11:9000", VPNIP: "192.0.2.101", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
expectedNode2 := peer.Peer{PublicEndpoint: "192.0.2.12:9000", VPNIP: "192.0.2.102", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
expectedNode3 := peer.Peer{PublicEndpoint: "192.0.2.13:9000", VPNIP: "192.0.2.103", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
expectedCoord := peer.Peer{PublicEndpoint: "192.0.2.1:9000", VPNIP: "192.0.2.100", VPNPubKey: coordinatorPubKey, Role: role.Coordinator}
testCases := map[string]struct {
nodes []*stubNode
state state.State
switchToPersistentStoreErr error
expectErr bool
expectedPeers []peer.Peer
expectedState state.State
}{
"0 nodes": {
state: state.AcceptingInit,
expectedPeers: []peer.Peer{expectedCoord},
expectedState: state.ActivatingNodes,
},
"1 node": {
nodes: []*stubNode{testNode1},
state: state.AcceptingInit,
expectedPeers: []peer.Peer{expectedCoord, expectedNode1},
expectedState: state.ActivatingNodes,
},
"2 nodes": {
nodes: []*stubNode{testNode1, testNode2},
state: state.AcceptingInit,
expectedPeers: []peer.Peer{expectedCoord, expectedNode1, expectedNode2},
expectedState: state.ActivatingNodes,
},
"3 nodes": {
nodes: []*stubNode{testNode1, testNode2, testNode3},
state: state.AcceptingInit,
expectedPeers: []peer.Peer{expectedCoord, expectedNode1, expectedNode2, expectedNode3},
expectedState: state.ActivatingNodes,
},
"already activated": {
nodes: []*stubNode{testNode1},
state: state.ActivatingNodes,
expectErr: true,
expectedState: state.ActivatingNodes,
},
"wrong peer kind": {
nodes: []*stubNode{testNode1},
state: state.IsNode,
expectErr: true,
expectedState: state.IsNode,
},
"node activation error": {
nodes: []*stubNode{testNode1, {activateErr: someErr}, testNode3},
state: state.AcceptingInit,
expectErr: true,
expectedState: state.Failed,
},
"node join error": {
nodes: []*stubNode{testNode1, {joinErr: someErr}, testNode3},
state: state.AcceptingInit,
expectErr: true,
expectedState: state.Failed,
},
"SwitchToPersistentStore error": {
nodes: []*stubNode{testNode1},
state: state.AcceptingInit,
switchToPersistentStoreErr: someErr,
expectErr: true,
expectedState: state.Failed,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
adminPubKey := []byte{7, 8, 9}
autoscalingNodeGroups := []string{"ang1", "ang2"}
keyEncryptionKeyID := "constellation"
core := &fakeCore{
state: tc.state,
vpnPubKey: coordinatorPubKey,
switchToPersistentStoreErr: tc.switchToPersistentStoreErr,
kubeconfig: []byte("kubeconfig"),
ownerID: []byte("ownerID"),
clusterID: []byte("clusterID"),
}
dialer := testdialer.NewBufconnDialer()
getPublicIPAddr := func() (string, error) {
return "192.0.2.1", nil
}
api := New(zaptest.NewLogger(t), core, dialer, stubVPNAPIServer{}, fakeValidator{}, getPublicIPAddr)
// spawn nodes
var nodePublicEndpoints []string
for _, n := range tc.nodes {
publicEndpoint := net.JoinHostPort(n.publicIP, endpointAVPNPort)
nodePublicEndpoints = append(nodePublicEndpoints, publicEndpoint)
server := n.newServer()
go server.Serve(dialer.GetListener(publicEndpoint))
defer server.GracefulStop()
}
stream := &stubActivateAsCoordinatorServer{}
err := api.ActivateAsCoordinator(&pubproto.ActivateAsCoordinatorRequest{
AdminVpnPubKey: adminPubKey,
NodePublicEndpoints: nodePublicEndpoints,
AutoscalingNodeGroups: autoscalingNodeGroups,
MasterSecret: []byte("Constellation"),
KeyEncryptionKeyId: keyEncryptionKeyID,
UseExistingKek: false,
KmsUri: kms.ClusterKMSURI,
StorageUri: kms.NoStoreURI,
}, stream)
assert.Equal(tc.expectedState, core.state)
if tc.expectErr {
assert.Error(err)
return
}
require.NoError(err)
// Coordinator streams logs and admin conf
require.Len(stream.sent, len(tc.nodes)+1)
for i := 0; i < len(tc.nodes); i++ {
assert.NotEmpty(stream.sent[i].GetLog().Message)
}
adminConfig := stream.sent[len(tc.nodes)].GetAdminConfig()
assert.Equal("192.0.2.99", adminConfig.AdminVpnIp)
assert.Equal(coordinatorPubKey, adminConfig.CoordinatorVpnPubKey)
assert.Equal(core.kubeconfig, adminConfig.Kubeconfig)
assert.Equal(core.ownerID, adminConfig.OwnerId)
assert.Equal(core.clusterID, adminConfig.ClusterId)
// Core is updated
assert.Equal(adminPubKey, core.adminPubKey)
assert.Equal(core.GetCoordinatorVPNIP(), core.vpnIP)
assert.Equal(tc.expectedPeers, core.peers)
assert.Equal(autoscalingNodeGroups, core.autoscalingNodeGroups)
assert.Equal(keyEncryptionKeyID, core.kekID)
})
}
}
func TestActivateAdditionalNodes(t *testing.T) {
someErr := errors.New("failed")
testNode1 := &stubNode{publicIP: "192.0.2.11", pubKey: []byte{1, 2, 3}}
testNode2 := &stubNode{publicIP: "192.0.2.12", pubKey: []byte{2, 3, 4}}
testNode3 := &stubNode{publicIP: "192.0.2.13", pubKey: []byte{3, 4, 5}}
expectedNode1 := peer.Peer{PublicEndpoint: "192.0.2.11:9000", VPNIP: "192.0.2.101", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
expectedNode2 := peer.Peer{PublicEndpoint: "192.0.2.12:9000", VPNIP: "192.0.2.102", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
expectedNode3 := peer.Peer{PublicEndpoint: "192.0.2.13:9000", VPNIP: "192.0.2.103", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
testCases := map[string]struct {
nodes []*stubNode
state state.State
expectErr bool
expectedPeers []peer.Peer
}{
"0 nodes": {
state: state.ActivatingNodes,
},
"1 node": {
nodes: []*stubNode{testNode1},
state: state.ActivatingNodes,
expectedPeers: []peer.Peer{expectedNode1},
},
"2 nodes": {
nodes: []*stubNode{testNode1, testNode2},
state: state.ActivatingNodes,
expectedPeers: []peer.Peer{expectedNode1, expectedNode2},
},
"3 nodes": {
nodes: []*stubNode{testNode1, testNode2, testNode3},
state: state.ActivatingNodes,
expectedPeers: []peer.Peer{expectedNode1, expectedNode2, expectedNode3},
},
"uninitialized": {
nodes: []*stubNode{testNode1},
expectErr: true,
},
"wrong peer kind": {
nodes: []*stubNode{testNode1},
state: state.IsNode,
expectErr: true,
},
"node activation error": {
nodes: []*stubNode{testNode1, {activateErr: someErr}, testNode3},
state: state.ActivatingNodes,
expectErr: true,
},
"node join error": {
nodes: []*stubNode{testNode1, {joinErr: someErr}, testNode3},
state: state.ActivatingNodes,
expectErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
core := &fakeCore{state: tc.state}
dialer := testdialer.NewBufconnDialer()
getPublicIPAddr := func() (string, error) {
return "192.0.2.1", nil
}
api := New(zaptest.NewLogger(t), core, dialer, nil, fakeValidator{}, getPublicIPAddr)
// spawn nodes
var nodePublicEndpoints []string
for _, n := range tc.nodes {
publicEndpoint := net.JoinHostPort(n.publicIP, endpointAVPNPort)
nodePublicEndpoints = append(nodePublicEndpoints, publicEndpoint)
server := n.newServer()
go server.Serve(dialer.GetListener(publicEndpoint))
defer server.GracefulStop()
}
stream := &stubActivateAdditionalNodesServer{}
err := api.ActivateAdditionalNodes(&pubproto.ActivateAdditionalNodesRequest{NodePublicEndpoints: nodePublicEndpoints}, stream)
if tc.expectErr {
assert.Error(err)
return
}
require.NoError(err)
// Coordinator streams logs
require.Len(stream.sent, len(tc.nodes)+1)
for _, s := range stream.sent {
assert.NotEmpty(s.GetLog().Message)
}
// Core is updated
assert.Equal(tc.expectedPeers, core.peers)
})
}
}
func TestMakeCoordinatorPeer(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
getPublicIPAddr := func() (string, error) {
return "192.0.2.1", nil
}
vpnPubKey := []byte{2, 3, 4}
core := &fakeCore{vpnPubKey: vpnPubKey}
api := New(zaptest.NewLogger(t), core, nil, nil, nil, getPublicIPAddr)
expected := peer.Peer{
PublicEndpoint: "192.0.2.1:9000",
VPNIP: core.GetCoordinatorVPNIP(),
VPNPubKey: vpnPubKey,
Role: role.Coordinator,
}
actual, err := api.makeCoordinatorPeer()
require.NoError(err)
assert.Equal(expected, actual)
}
type stubNode struct {
publicIP string
pubKey []byte
activateErr error
joinErr error
pubproto.UnimplementedAPIServer
}
func (n *stubNode) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeRequest) (*pubproto.ActivateAsNodeResponse, error) {
return &pubproto.ActivateAsNodeResponse{NodeVpnPubKey: n.pubKey}, n.activateErr
}
func (*stubNode) TriggerNodeUpdate(ctx context.Context, in *pubproto.TriggerNodeUpdateRequest) (*pubproto.TriggerNodeUpdateResponse, error) {
return &pubproto.TriggerNodeUpdateResponse{}, nil
}
func (n *stubNode) JoinCluster(ctx context.Context, in *pubproto.JoinClusterRequest) (*pubproto.JoinClusterResponse, error) {
return &pubproto.JoinClusterResponse{}, n.joinErr
}
func (n *stubNode) newServer() *grpc.Server {
tlsConfig, err := atls.CreateAttestationServerTLSConfig(fakeIssuer{})
if err != nil {
panic(err)
}
server := grpc.NewServer(grpc.Creds(credentials.NewTLS(tlsConfig)))
pubproto.RegisterAPIServer(server, n)
return server
}
type stubVPNAPIServer struct{}
func (stubVPNAPIServer) Listen(endpoint string) error {
return nil
}
func (stubVPNAPIServer) Serve() error {
return nil
}
func (stubVPNAPIServer) Close() {
}
type fakeIssuer struct {
oid.Dummy
}
func (fakeIssuer) Issue(userData []byte, nonce []byte) ([]byte, error) {
return userData, nil
}
type fakeValidator struct {
oid.Dummy
}
func (fakeValidator) Validate(attdoc []byte, nonce []byte) ([]byte, error) {
return attdoc, nil
}
type stubActivateAsCoordinatorServer struct {
grpc.ServerStream
sent []*pubproto.ActivateAsCoordinatorResponse
}
func (s *stubActivateAsCoordinatorServer) Send(req *pubproto.ActivateAsCoordinatorResponse) error {
s.sent = append(s.sent, req)
return nil
}
type stubActivateAdditionalNodesServer struct {
grpc.ServerStream
sent []*pubproto.ActivateAdditionalNodesResponse
}
func (s *stubActivateAdditionalNodesServer) Send(req *pubproto.ActivateAdditionalNodesResponse) error {
s.sent = append(s.sent, req)
return nil
}

View file

@ -0,0 +1,31 @@
package pubapi
import (
"context"
"github.com/edgelesssys/constellation/coordinator/peer"
"github.com/edgelesssys/constellation/coordinator/state"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
type Core interface {
GetVPNPubKey() ([]byte, error)
SetVPNIP(string) error
GetCoordinatorVPNIP() string
AddAdmin(pubKey []byte) (string, error)
GenerateNextIP() (string, error)
SwitchToPersistentStore() error
GetIDs(masterSecret []byte) (ownerID []byte, clusterID []byte, err error)
SetUpKMS(ctx context.Context, storageURI, kmsURI, kekID string, useExisting bool) error
GetState() state.State
RequireState(...state.State) error
AdvanceState(newState state.State, ownerID, clusterID []byte) error
GetPeers(resourceVersion int) (int, []peer.Peer, error)
AddPeer(peer.Peer) error
UpdatePeers([]peer.Peer) error
InitCluster(autoscalingNodeGroups []string) ([]byte, error)
JoinCluster(kubeadm.BootstrapTokenDiscovery) error
}

View file

@ -0,0 +1,108 @@
package pubapi
import (
"context"
"errors"
"fmt"
"github.com/edgelesssys/constellation/coordinator/peer"
"github.com/edgelesssys/constellation/coordinator/state"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
type fakeCore struct {
vpnPubKey []byte
vpnIP string
setVPNIPErr error
adminPubKey []byte
nextIP int
switchToPersistentStoreErr error
state state.State
ownerID []byte
clusterID []byte
peers []peer.Peer
updatedPeers [][]peer.Peer
kubeconfig []byte
autoscalingNodeGroups []string
joinArgs []kubeadm.BootstrapTokenDiscovery
joinClusterErr error
kekID string
}
func (c *fakeCore) GetVPNPubKey() ([]byte, error) {
return c.vpnPubKey, nil
}
func (c *fakeCore) SetVPNIP(ip string) error {
if len(c.ownerID) == 0 || len(c.clusterID) == 0 {
return errors.New("SetVPNIP called before IDs were set")
}
c.vpnIP = ip
return c.setVPNIPErr
}
func (*fakeCore) GetCoordinatorVPNIP() string {
return "192.0.2.100"
}
func (c *fakeCore) AddAdmin(pubKey []byte) (string, error) {
c.adminPubKey = pubKey
return "192.0.2.99", nil
}
func (c *fakeCore) GenerateNextIP() (string, error) {
c.nextIP++
return fmt.Sprintf("192.0.2.%v", 100+c.nextIP), nil
}
func (c *fakeCore) SwitchToPersistentStore() error {
return c.switchToPersistentStoreErr
}
func (c *fakeCore) GetIDs(masterSecret []byte) (ownerID []byte, clusterID []byte, err error) {
return c.ownerID, c.clusterID, nil
}
func (c *fakeCore) GetState() state.State {
return c.state.Get()
}
func (c *fakeCore) RequireState(states ...state.State) error {
return c.state.Require(states...)
}
func (c *fakeCore) AdvanceState(newState state.State, ownerID, clusterID []byte) error {
c.ownerID = ownerID
c.clusterID = clusterID
c.state.Advance(newState)
return nil
}
func (*fakeCore) GetPeers(resourceVersion int) (int, []peer.Peer, error) {
return 0, nil, nil
}
func (c *fakeCore) AddPeer(peer peer.Peer) error {
c.peers = append(c.peers, peer)
return nil
}
func (c *fakeCore) UpdatePeers(peers []peer.Peer) error {
c.updatedPeers = append(c.updatedPeers, peers)
return nil
}
func (c *fakeCore) InitCluster(autoscalingNodeGroups []string) ([]byte, error) {
c.autoscalingNodeGroups = autoscalingNodeGroups
return c.kubeconfig, nil
}
func (c *fakeCore) JoinCluster(args kubeadm.BootstrapTokenDiscovery) error {
c.joinArgs = append(c.joinArgs, args)
return c.joinClusterErr
}
func (c *fakeCore) SetUpKMS(ctx context.Context, storageURI, kmsURI, kekID string, useExisting bool) error {
c.kekID = kekID
return nil
}

163
coordinator/pubapi/node.go Normal file
View file

@ -0,0 +1,163 @@
package pubapi
import (
"context"
"net"
"time"
"github.com/edgelesssys/constellation/coordinator/peer"
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
"github.com/edgelesssys/constellation/coordinator/state"
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
// ActivateAsNode is the RPC call to activate a Node.
func (a *API) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeRequest) (resp *pubproto.ActivateAsNodeResponse, reterr error) {
a.mut.Lock()
defer a.mut.Unlock()
if err := a.core.RequireState(state.AcceptingInit); err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
}
if len(in.OwnerId) == 0 || len(in.ClusterId) == 0 {
a.logger.Error("missing data to taint worker node as initialized")
return nil, status.Error(codes.InvalidArgument, "missing data to taint worker node as initialized")
}
// If any of the following actions fail, we cannot revert.
// Thus, mark this peer as failed.
defer func() {
if reterr != nil {
_ = a.core.AdvanceState(state.Failed, nil, nil)
}
}()
// AdvanceState MUST be called before any other functions that are not sanity checks or otherwise required
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
// Any new additions to ActivateAsNode MUST come after
if err := a.core.AdvanceState(state.NodeWaitingForClusterJoin, in.OwnerId, in.ClusterId); err != nil {
return nil, status.Errorf(codes.Internal, "%v", err)
}
vpnPubKey, err := a.core.GetVPNPubKey()
if err != nil {
return nil, status.Errorf(codes.Internal, "%v", err)
}
if err := a.core.SetVPNIP(in.NodeVpnIp); err != nil {
return nil, status.Errorf(codes.Internal, "%v", err)
}
// add initial peers
if err := a.core.UpdatePeers(peer.FromPubProto(in.Peers)); err != nil {
return nil, status.Errorf(codes.Internal, "%v", err)
}
// regularly get (peer) updates from Coordinator
a.wgClose.Add(1)
go a.updateLoop()
return &pubproto.ActivateAsNodeResponse{NodeVpnPubKey: vpnPubKey}, nil
}
// JoinCluster is the RPC call to request this node to join the cluster.
func (a *API) JoinCluster(ctx context.Context, in *pubproto.JoinClusterRequest) (*pubproto.JoinClusterResponse, error) {
a.mut.Lock()
defer a.mut.Unlock()
if err := a.core.RequireState(state.NodeWaitingForClusterJoin); err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
}
conn, err := a.dialInsecure(ctx, net.JoinHostPort(a.core.GetCoordinatorVPNIP(), vpnAPIPort))
if err != nil {
return nil, status.Errorf(codes.Unavailable, "%v", err)
}
resp, err := vpnproto.NewAPIClient(conn).GetK8SJoinArgs(ctx, &vpnproto.GetK8SJoinArgsRequest{})
conn.Close()
if err != nil {
return nil, status.Errorf(codes.Internal, "%v", err)
}
err = a.core.JoinCluster(kubeadm.BootstrapTokenDiscovery{
APIServerEndpoint: resp.ApiServerEndpoint,
Token: resp.Token,
CACertHashes: []string{resp.DiscoveryTokenCaCertHash},
})
if err != nil {
_ = a.core.AdvanceState(state.Failed, nil, nil)
return nil, status.Errorf(codes.Internal, "%v", err)
}
if err := a.core.AdvanceState(state.IsNode, nil, nil); err != nil {
return nil, status.Errorf(codes.Internal, "%v", err)
}
return &pubproto.JoinClusterResponse{}, nil
}
// TriggerNodeUpdate is the RPC call to request this node to get an update from the Coordinator.
func (a *API) TriggerNodeUpdate(ctx context.Context, in *pubproto.TriggerNodeUpdateRequest) (*pubproto.TriggerNodeUpdateResponse, error) {
if err := a.core.RequireState(state.IsNode); err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
}
if err := a.update(ctx); err != nil {
return nil, status.Errorf(codes.Internal, "%v", err)
}
return &pubproto.TriggerNodeUpdateResponse{}, nil
}
func (a *API) updateLoop() {
defer a.wgClose.Done()
ticker := time.NewTicker(updateInterval)
for {
if err := a.update(context.Background()); err != nil {
a.logger.Error("updateLoop: update failed", zap.Error(err))
}
select {
case <-a.stopUpdate:
ticker.Stop()
return
case <-ticker.C:
}
}
}
func (a *API) update(ctx context.Context) error {
a.mut.Lock()
defer a.mut.Unlock()
ctx, cancel := context.WithTimeout(ctx, deadlineDuration)
defer cancel()
conn, err := a.dialInsecure(ctx, net.JoinHostPort(a.core.GetCoordinatorVPNIP(), vpnAPIPort))
if err != nil {
return err
}
resp, err := vpnproto.NewAPIClient(conn).GetUpdate(ctx, &vpnproto.GetUpdateRequest{ResourceVersion: int64(a.resourceVersion)})
conn.Close()
if err != nil {
return err
}
resourceVersion := int(resp.ResourceVersion)
if resourceVersion == a.resourceVersion {
return nil
}
// TODO does this naive approach of performing full updates everytime need to be replaced by something more clever like watches in K8s?
// https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes
if err := a.core.UpdatePeers(peer.FromVPNProto(resp.Peers)); err != nil {
return err
}
a.resourceVersion = resourceVersion
return nil
}

View file

@ -0,0 +1,295 @@
package pubapi
import (
"context"
"errors"
"net"
"testing"
"github.com/edgelesssys/constellation/coordinator/peer"
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
"github.com/edgelesssys/constellation/coordinator/state"
"github.com/edgelesssys/constellation/coordinator/util/testdialer"
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"google.golang.org/grpc"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
func TestActivateAsNode(t *testing.T) {
someErr := errors.New("failed")
peer1 := peer.Peer{PublicEndpoint: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}}
peer2 := peer.Peer{PublicEndpoint: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}}
testCases := map[string]struct {
initialPeers []peer.Peer
updatedPeers []peer.Peer
state state.State
getUpdateErr error
setVPNIPErr error
expectErr bool
expectedState state.State
}{
"basic": {
initialPeers: []peer.Peer{peer1},
updatedPeers: []peer.Peer{peer2},
state: state.AcceptingInit,
expectedState: state.NodeWaitingForClusterJoin,
},
"already activated": {
initialPeers: []peer.Peer{peer1},
updatedPeers: []peer.Peer{peer2},
state: state.IsNode,
expectErr: true,
expectedState: state.IsNode,
},
"wrong peer kind": {
initialPeers: []peer.Peer{peer1},
updatedPeers: []peer.Peer{peer2},
state: state.ActivatingNodes,
expectErr: true,
expectedState: state.ActivatingNodes,
},
"GetUpdate error": {
initialPeers: []peer.Peer{peer1},
updatedPeers: []peer.Peer{peer2},
state: state.AcceptingInit,
getUpdateErr: someErr,
expectedState: state.NodeWaitingForClusterJoin,
},
"SetVPNIP error": {
initialPeers: []peer.Peer{peer1},
updatedPeers: []peer.Peer{peer2},
state: state.AcceptingInit,
setVPNIPErr: someErr,
expectErr: true,
expectedState: state.Failed,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
const nodeVPNIP = "192.0.2.2"
vpnPubKey := []byte{7, 8, 9}
ownerID := []byte("ownerID")
clusterID := []byte("clusterID")
logger := zaptest.NewLogger(t)
core := &fakeCore{state: tc.state, vpnPubKey: vpnPubKey, setVPNIPErr: tc.setVPNIPErr}
dialer := testdialer.NewBufconnDialer()
api := New(logger, core, dialer, nil, nil, nil)
defer api.Close()
vserver := grpc.NewServer()
vapi := &stubVPNAPI{peers: tc.updatedPeers, getUpdateErr: tc.getUpdateErr}
vpnproto.RegisterAPIServer(vserver, vapi)
go vserver.Serve(dialer.GetListener(net.JoinHostPort(core.GetCoordinatorVPNIP(), vpnAPIPort)))
defer vserver.GracefulStop()
resp, err := api.ActivateAsNode(context.Background(), &pubproto.ActivateAsNodeRequest{
NodeVpnIp: nodeVPNIP,
Peers: peer.ToPubProto(tc.initialPeers),
OwnerId: ownerID,
ClusterId: clusterID,
})
assert.Equal(tc.expectedState, core.state)
if tc.expectErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal(vpnPubKey, resp.NodeVpnPubKey)
assert.Equal(nodeVPNIP, core.vpnIP)
assert.Equal(ownerID, core.ownerID)
assert.Equal(clusterID, core.clusterID)
api.Close() // blocks until update loop finished
if tc.getUpdateErr == nil {
require.Len(core.updatedPeers, 2)
assert.Equal(tc.updatedPeers, core.updatedPeers[1])
} else {
require.Len(core.updatedPeers, 1)
}
assert.Equal(tc.initialPeers, core.updatedPeers[0])
})
}
}
func TestTriggerNodeUpdate(t *testing.T) {
someErr := errors.New("failed")
peers := []peer.Peer{
{PublicEndpoint: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}},
{PublicEndpoint: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}},
}
testCases := map[string]struct {
peers []peer.Peer
state state.State
getUpdateErr error
expectErr bool
}{
"basic": {
peers: peers,
state: state.IsNode,
},
"not activated": {
peers: peers,
state: state.AcceptingInit,
expectErr: true,
},
"wrong peer kind": {
peers: peers,
state: state.ActivatingNodes,
expectErr: true,
},
"GetUpdate error": {
peers: peers,
state: state.IsNode,
getUpdateErr: someErr,
expectErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
logger := zaptest.NewLogger(t)
core := &fakeCore{state: tc.state}
dialer := testdialer.NewBufconnDialer()
api := New(logger, core, dialer, nil, nil, nil)
vserver := grpc.NewServer()
vapi := &stubVPNAPI{
peers: tc.peers,
getUpdateErr: tc.getUpdateErr,
}
vpnproto.RegisterAPIServer(vserver, vapi)
go vserver.Serve(dialer.GetListener(net.JoinHostPort(core.GetCoordinatorVPNIP(), vpnAPIPort)))
defer vserver.GracefulStop()
_, err := api.TriggerNodeUpdate(context.Background(), &pubproto.TriggerNodeUpdateRequest{})
if tc.expectErr {
assert.Error(err)
return
}
require.NoError(err)
// second update should be a noop
_, err = api.TriggerNodeUpdate(context.Background(), &pubproto.TriggerNodeUpdateRequest{})
require.NoError(err)
require.Len(core.updatedPeers, 1)
assert.Equal(tc.peers, core.updatedPeers[0])
})
}
}
func TestJoinCluster(t *testing.T) {
someErr := errors.New("failed")
testCases := map[string]struct {
state state.State
getJoinArgsErr error
joinClusterErr error
expectErr bool
expectedState state.State
}{
"basic": {
state: state.NodeWaitingForClusterJoin,
expectedState: state.IsNode,
},
"not activated": {
state: state.AcceptingInit,
expectErr: true,
expectedState: state.AcceptingInit,
},
"wrong peer kind": {
state: state.ActivatingNodes,
expectErr: true,
expectedState: state.ActivatingNodes,
},
"GetK8sJoinArgs error": {
state: state.NodeWaitingForClusterJoin,
getJoinArgsErr: someErr,
expectErr: true,
expectedState: state.NodeWaitingForClusterJoin,
},
"JoinCluster error": {
state: state.NodeWaitingForClusterJoin,
joinClusterErr: someErr,
expectErr: true,
expectedState: state.Failed,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
logger := zaptest.NewLogger(t)
core := &fakeCore{state: tc.state, joinClusterErr: tc.joinClusterErr}
dialer := testdialer.NewBufconnDialer()
api := New(logger, core, dialer, nil, nil, nil)
vserver := grpc.NewServer()
vapi := &stubVPNAPI{
joinArgs: kubeadm.BootstrapTokenDiscovery{
APIServerEndpoint: "endp",
Token: "token",
CACertHashes: []string{"dis"},
},
getJoinArgsErr: tc.getJoinArgsErr,
}
vpnproto.RegisterAPIServer(vserver, vapi)
go vserver.Serve(dialer.GetListener(net.JoinHostPort(core.GetCoordinatorVPNIP(), vpnAPIPort)))
defer vserver.GracefulStop()
_, err := api.JoinCluster(context.Background(), &pubproto.JoinClusterRequest{})
assert.Equal(tc.expectedState, core.state)
if tc.expectErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal([]kubeadm.BootstrapTokenDiscovery{vapi.joinArgs}, core.joinArgs)
})
}
}
type stubVPNAPI struct {
peers []peer.Peer
getUpdateErr error
joinArgs kubeadm.BootstrapTokenDiscovery
getJoinArgsErr error
vpnproto.UnimplementedAPIServer
}
func (a *stubVPNAPI) GetUpdate(ctx context.Context, in *vpnproto.GetUpdateRequest) (*vpnproto.GetUpdateResponse, error) {
return &vpnproto.GetUpdateResponse{ResourceVersion: 1, Peers: peer.ToVPNProto(a.peers)}, a.getUpdateErr
}
func (a *stubVPNAPI) GetK8SJoinArgs(ctx context.Context, in *vpnproto.GetK8SJoinArgsRequest) (*vpnproto.GetK8SJoinArgsResponse, error) {
return &vpnproto.GetK8SJoinArgsResponse{
ApiServerEndpoint: a.joinArgs.APIServerEndpoint,
Token: a.joinArgs.Token,
DiscoveryTokenCaCertHash: a.joinArgs.CACertHashes[0],
}, a.getJoinArgsErr
}

View file

@ -0,0 +1,114 @@
// Package pubapi implements the API that a peer exposes publicly.
package pubapi
import (
"context"
"net"
"sync"
"time"
"github.com/edgelesssys/constellation/coordinator/atls"
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
const (
deadlineDuration = time.Minute
endpointAVPNPort = "9000"
vpnAPIPort = "9027"
updateInterval = 10 * time.Second
)
// API is the API.
type API struct {
mut sync.Mutex
logger *zap.Logger
core Core
dialer Dialer
vpnAPIServer VPNAPIServer
validator atls.Validator
getPublicIPAddr GetIPAddrFunc
stopUpdate chan struct{}
wgClose sync.WaitGroup
resourceVersion int
pubproto.UnimplementedAPIServer
}
// New creates a new API.
func New(logger *zap.Logger, core Core, dialer Dialer, vpnAPIServer VPNAPIServer, validator atls.Validator, getPublicIPAddr GetIPAddrFunc) *API {
return &API{
logger: logger,
core: core,
dialer: dialer,
vpnAPIServer: vpnAPIServer,
validator: validator,
getPublicIPAddr: getPublicIPAddr,
stopUpdate: make(chan struct{}, 1),
}
}
// GetState is the RPC call to get the peer's state.
func (a *API) GetState(ctx context.Context, in *pubproto.GetStateRequest) (*pubproto.GetStateResponse, error) {
return &pubproto.GetStateResponse{State: uint32(a.core.GetState())}, nil
}
// Close closes the API.
func (a *API) Close() {
a.stopUpdate <- struct{}{}
if a.vpnAPIServer != nil {
a.vpnAPIServer.Close()
}
a.wgClose.Wait()
}
func (a *API) dial(ctx context.Context, target string) (*grpc.ClientConn, error) {
tlsConfig, err := atls.CreateAttestationClientTLSConfig([]atls.Validator{a.validator})
if err != nil {
return nil, err
}
return grpc.DialContext(ctx, target,
a.grpcWithDialer(),
grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),
)
}
func (a *API) dialInsecure(ctx context.Context, target string) (*grpc.ClientConn, error) {
return grpc.DialContext(ctx, target,
a.grpcWithDialer(),
grpc.WithTransportCredentials(insecure.NewCredentials()),
)
}
func (a *API) dialNoVerify(ctx context.Context, target string) (*grpc.ClientConn, error) {
tlsConfig, err := atls.CreateUnverifiedClientTLSConfig()
if err != nil {
return nil, err
}
return grpc.DialContext(ctx, target,
a.grpcWithDialer(),
grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),
)
}
func (a *API) grpcWithDialer() grpc.DialOption {
return grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
return a.dialer.DialContext(ctx, "tcp", addr)
})
}
type Dialer interface {
DialContext(ctx context.Context, network, address string) (net.Conn, error)
}
type VPNAPIServer interface {
Listen(endpoint string) error
Serve() error
Close()
}
type GetIPAddrFunc func() (string, error)

View file

@ -0,0 +1,16 @@
package pubapi
import (
"testing"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m,
// https://github.com/census-instrumentation/opencensus-go/issues/1262
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
// https://github.com/kubernetes/klog/issues/282, https://github.com/kubernetes/klog/issues/188
goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"),
)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,90 @@
syntax = "proto3";
package pubapi;
option go_package = "github.com/edgelesssys/constellation/coordinator/pubapi/pubproto";
service API {
rpc GetState(GetStateRequest) returns (GetStateResponse);
rpc ActivateAsCoordinator(ActivateAsCoordinatorRequest) returns (stream ActivateAsCoordinatorResponse);
rpc ActivateAsNode(ActivateAsNodeRequest) returns (ActivateAsNodeResponse);
rpc ActivateAdditionalNodes(ActivateAdditionalNodesRequest) returns (stream ActivateAdditionalNodesResponse);
rpc JoinCluster(JoinClusterRequest) returns (JoinClusterResponse);
rpc TriggerNodeUpdate(TriggerNodeUpdateRequest) returns (TriggerNodeUpdateResponse);
}
message GetStateRequest {
}
message GetStateResponse {
uint32 state = 1;
}
message ActivateAsCoordinatorRequest {
bytes admin_vpn_pub_key = 1;
repeated string node_public_endpoints = 2;
repeated string autoscaling_node_groups = 3;
bytes master_secret = 4;
string kms_uri = 5;
string storage_uri = 6;
string key_encryption_key_id = 7;
bool use_existing_kek = 8;
string cloud_service_account_uri = 9;
}
message ActivateAsCoordinatorResponse {
oneof content {
AdminConfig admin_config = 1;
Log log = 2;
}
}
message ActivateAsNodeRequest {
string node_vpn_ip = 1;
repeated Peer peers = 2;
bytes owner_id = 3;
bytes cluster_id = 4;
}
message ActivateAsNodeResponse {
bytes node_vpn_pub_key = 1;
}
message ActivateAdditionalNodesRequest {
repeated string node_public_endpoints = 1;
}
message ActivateAdditionalNodesResponse {
Log log = 1;
}
message JoinClusterRequest {
}
message JoinClusterResponse {
}
message TriggerNodeUpdateRequest {
}
message TriggerNodeUpdateResponse {
}
message AdminConfig {
string admin_vpn_ip = 1;
bytes coordinator_vpn_pub_key = 2;
bytes kubeconfig = 3;
bytes owner_id = 4;
bytes cluster_id = 5;
}
message Log {
string message = 1;
}
message Peer {
string public_endpoint = 1;
string vpn_ip = 2;
bytes vpn_pub_key = 3;
uint32 role = 4;
}

View file

@ -0,0 +1,336 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
package pubproto
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// APIClient is the client API for API service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type APIClient interface {
GetState(ctx context.Context, in *GetStateRequest, opts ...grpc.CallOption) (*GetStateResponse, error)
ActivateAsCoordinator(ctx context.Context, in *ActivateAsCoordinatorRequest, opts ...grpc.CallOption) (API_ActivateAsCoordinatorClient, error)
ActivateAsNode(ctx context.Context, in *ActivateAsNodeRequest, opts ...grpc.CallOption) (*ActivateAsNodeResponse, error)
ActivateAdditionalNodes(ctx context.Context, in *ActivateAdditionalNodesRequest, opts ...grpc.CallOption) (API_ActivateAdditionalNodesClient, error)
JoinCluster(ctx context.Context, in *JoinClusterRequest, opts ...grpc.CallOption) (*JoinClusterResponse, error)
TriggerNodeUpdate(ctx context.Context, in *TriggerNodeUpdateRequest, opts ...grpc.CallOption) (*TriggerNodeUpdateResponse, error)
}
type aPIClient struct {
cc grpc.ClientConnInterface
}
func NewAPIClient(cc grpc.ClientConnInterface) APIClient {
return &aPIClient{cc}
}
func (c *aPIClient) GetState(ctx context.Context, in *GetStateRequest, opts ...grpc.CallOption) (*GetStateResponse, error) {
out := new(GetStateResponse)
err := c.cc.Invoke(ctx, "/pubapi.API/GetState", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) ActivateAsCoordinator(ctx context.Context, in *ActivateAsCoordinatorRequest, opts ...grpc.CallOption) (API_ActivateAsCoordinatorClient, error) {
stream, err := c.cc.NewStream(ctx, &API_ServiceDesc.Streams[0], "/pubapi.API/ActivateAsCoordinator", opts...)
if err != nil {
return nil, err
}
x := &aPIActivateAsCoordinatorClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type API_ActivateAsCoordinatorClient interface {
Recv() (*ActivateAsCoordinatorResponse, error)
grpc.ClientStream
}
type aPIActivateAsCoordinatorClient struct {
grpc.ClientStream
}
func (x *aPIActivateAsCoordinatorClient) Recv() (*ActivateAsCoordinatorResponse, error) {
m := new(ActivateAsCoordinatorResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *aPIClient) ActivateAsNode(ctx context.Context, in *ActivateAsNodeRequest, opts ...grpc.CallOption) (*ActivateAsNodeResponse, error) {
out := new(ActivateAsNodeResponse)
err := c.cc.Invoke(ctx, "/pubapi.API/ActivateAsNode", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) ActivateAdditionalNodes(ctx context.Context, in *ActivateAdditionalNodesRequest, opts ...grpc.CallOption) (API_ActivateAdditionalNodesClient, error) {
stream, err := c.cc.NewStream(ctx, &API_ServiceDesc.Streams[1], "/pubapi.API/ActivateAdditionalNodes", opts...)
if err != nil {
return nil, err
}
x := &aPIActivateAdditionalNodesClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type API_ActivateAdditionalNodesClient interface {
Recv() (*ActivateAdditionalNodesResponse, error)
grpc.ClientStream
}
type aPIActivateAdditionalNodesClient struct {
grpc.ClientStream
}
func (x *aPIActivateAdditionalNodesClient) Recv() (*ActivateAdditionalNodesResponse, error) {
m := new(ActivateAdditionalNodesResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *aPIClient) JoinCluster(ctx context.Context, in *JoinClusterRequest, opts ...grpc.CallOption) (*JoinClusterResponse, error) {
out := new(JoinClusterResponse)
err := c.cc.Invoke(ctx, "/pubapi.API/JoinCluster", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) TriggerNodeUpdate(ctx context.Context, in *TriggerNodeUpdateRequest, opts ...grpc.CallOption) (*TriggerNodeUpdateResponse, error) {
out := new(TriggerNodeUpdateResponse)
err := c.cc.Invoke(ctx, "/pubapi.API/TriggerNodeUpdate", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// APIServer is the server API for API service.
// All implementations must embed UnimplementedAPIServer
// for forward compatibility
type APIServer interface {
GetState(context.Context, *GetStateRequest) (*GetStateResponse, error)
ActivateAsCoordinator(*ActivateAsCoordinatorRequest, API_ActivateAsCoordinatorServer) error
ActivateAsNode(context.Context, *ActivateAsNodeRequest) (*ActivateAsNodeResponse, error)
ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error
JoinCluster(context.Context, *JoinClusterRequest) (*JoinClusterResponse, error)
TriggerNodeUpdate(context.Context, *TriggerNodeUpdateRequest) (*TriggerNodeUpdateResponse, error)
mustEmbedUnimplementedAPIServer()
}
// UnimplementedAPIServer must be embedded to have forward compatible implementations.
type UnimplementedAPIServer struct {
}
func (UnimplementedAPIServer) GetState(context.Context, *GetStateRequest) (*GetStateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetState not implemented")
}
func (UnimplementedAPIServer) ActivateAsCoordinator(*ActivateAsCoordinatorRequest, API_ActivateAsCoordinatorServer) error {
return status.Errorf(codes.Unimplemented, "method ActivateAsCoordinator not implemented")
}
func (UnimplementedAPIServer) ActivateAsNode(context.Context, *ActivateAsNodeRequest) (*ActivateAsNodeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ActivateAsNode not implemented")
}
func (UnimplementedAPIServer) ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error {
return status.Errorf(codes.Unimplemented, "method ActivateAdditionalNodes not implemented")
}
func (UnimplementedAPIServer) JoinCluster(context.Context, *JoinClusterRequest) (*JoinClusterResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method JoinCluster not implemented")
}
func (UnimplementedAPIServer) TriggerNodeUpdate(context.Context, *TriggerNodeUpdateRequest) (*TriggerNodeUpdateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method TriggerNodeUpdate not implemented")
}
func (UnimplementedAPIServer) mustEmbedUnimplementedAPIServer() {}
// UnsafeAPIServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to APIServer will
// result in compilation errors.
type UnsafeAPIServer interface {
mustEmbedUnimplementedAPIServer()
}
func RegisterAPIServer(s grpc.ServiceRegistrar, srv APIServer) {
s.RegisterService(&API_ServiceDesc, srv)
}
func _API_GetState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetStateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).GetState(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pubapi.API/GetState",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).GetState(ctx, req.(*GetStateRequest))
}
return interceptor(ctx, in, info, handler)
}
func _API_ActivateAsCoordinator_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(ActivateAsCoordinatorRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(APIServer).ActivateAsCoordinator(m, &aPIActivateAsCoordinatorServer{stream})
}
type API_ActivateAsCoordinatorServer interface {
Send(*ActivateAsCoordinatorResponse) error
grpc.ServerStream
}
type aPIActivateAsCoordinatorServer struct {
grpc.ServerStream
}
func (x *aPIActivateAsCoordinatorServer) Send(m *ActivateAsCoordinatorResponse) error {
return x.ServerStream.SendMsg(m)
}
func _API_ActivateAsNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ActivateAsNodeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).ActivateAsNode(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pubapi.API/ActivateAsNode",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).ActivateAsNode(ctx, req.(*ActivateAsNodeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _API_ActivateAdditionalNodes_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(ActivateAdditionalNodesRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(APIServer).ActivateAdditionalNodes(m, &aPIActivateAdditionalNodesServer{stream})
}
type API_ActivateAdditionalNodesServer interface {
Send(*ActivateAdditionalNodesResponse) error
grpc.ServerStream
}
type aPIActivateAdditionalNodesServer struct {
grpc.ServerStream
}
func (x *aPIActivateAdditionalNodesServer) Send(m *ActivateAdditionalNodesResponse) error {
return x.ServerStream.SendMsg(m)
}
func _API_JoinCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(JoinClusterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).JoinCluster(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pubapi.API/JoinCluster",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).JoinCluster(ctx, req.(*JoinClusterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _API_TriggerNodeUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(TriggerNodeUpdateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).TriggerNodeUpdate(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pubapi.API/TriggerNodeUpdate",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).TriggerNodeUpdate(ctx, req.(*TriggerNodeUpdateRequest))
}
return interceptor(ctx, in, info, handler)
}
// API_ServiceDesc is the grpc.ServiceDesc for API service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var API_ServiceDesc = grpc.ServiceDesc{
ServiceName: "pubapi.API",
HandlerType: (*APIServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetState",
Handler: _API_GetState_Handler,
},
{
MethodName: "ActivateAsNode",
Handler: _API_ActivateAsNode_Handler,
},
{
MethodName: "JoinCluster",
Handler: _API_JoinCluster_Handler,
},
{
MethodName: "TriggerNodeUpdate",
Handler: _API_TriggerNodeUpdate_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ActivateAsCoordinator",
Handler: _API_ActivateAsCoordinator_Handler,
ServerStreams: true,
},
{
StreamName: "ActivateAdditionalNodes",
Handler: _API_ActivateAdditionalNodes_Handler,
ServerStreams: true,
},
},
Metadata: "pubapi.proto",
}