mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-10-01 01:36:09 -04:00
peer: save PublicIP instead of publicEndpoint / add multi-coord gRPCs
This commit is contained in:
parent
55a1aa783f
commit
f0e35a43d4
@ -162,7 +162,7 @@ func activate(ctx context.Context, cmd *cobra.Command, client protoClient, input
|
||||
return activationResult{}, err
|
||||
}
|
||||
|
||||
respCl, err := client.Activate(ctx, input.pubKey, input.masterSecret, ipsToEndpoints(input.nodePrivIPs, *config.CoordinatorPort), input.autoscalingNodeGroups, input.cloudServiceAccountURI)
|
||||
respCl, err := client.Activate(ctx, input.pubKey, input.masterSecret, input.nodePrivIPs, input.autoscalingNodeGroups, input.cloudServiceAccountURI)
|
||||
if err != nil {
|
||||
return activationResult{}, err
|
||||
}
|
||||
|
@ -77,17 +77,17 @@ func (c *Client) Close() error {
|
||||
}
|
||||
|
||||
// Activate activates the Constellation coordinator via a grpc call.
|
||||
// The handed endpoints must be the private endpoints of running AWS or GCP instances,
|
||||
// The handed IP addresses must be the private IP addresses of running AWS or GCP instances,
|
||||
// and the userPublicKey is the VPN key of the users WireGuard interface.
|
||||
func (c *Client) Activate(ctx context.Context, userPublicKey, masterSecret []byte, endpoints, autoscalingNodeGroups []string, cloudServiceAccountURI string) (ActivationResponseClient, error) {
|
||||
func (c *Client) Activate(ctx context.Context, userPublicKey, masterSecret []byte, ips, autoscalingNodeGroups []string, cloudServiceAccountURI string) (ActivationResponseClient, error) {
|
||||
if c.avpn == nil {
|
||||
return nil, errors.New("client is not connected")
|
||||
}
|
||||
if len(userPublicKey) == 0 {
|
||||
return nil, errors.New("parameter userPublicKey is empty")
|
||||
}
|
||||
if len(endpoints) == 0 {
|
||||
return nil, errors.New("parameter endpoints is empty")
|
||||
if len(ips) == 0 {
|
||||
return nil, errors.New("parameter ips is empty")
|
||||
}
|
||||
|
||||
pubKey, err := wgtypes.ParseKey(string(userPublicKey))
|
||||
@ -97,7 +97,7 @@ func (c *Client) Activate(ctx context.Context, userPublicKey, masterSecret []byt
|
||||
|
||||
avpnRequest := &pubproto.ActivateAsCoordinatorRequest{
|
||||
AdminVpnPubKey: pubKey[:],
|
||||
NodePublicEndpoints: endpoints,
|
||||
NodePublicIps: ips,
|
||||
AutoscalingNodeGroups: autoscalingNodeGroups,
|
||||
MasterSecret: masterSecret,
|
||||
KmsUri: kms.ClusterKMSURI,
|
||||
|
@ -72,42 +72,42 @@ func TestActivate(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
avpn *stubAVPNClient
|
||||
userPublicKey string
|
||||
endpoints []string
|
||||
ips []string
|
||||
errExpected bool
|
||||
}{
|
||||
"normal activation": {
|
||||
avpn: &stubAVPNClient{},
|
||||
userPublicKey: testKey,
|
||||
endpoints: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
ips: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
errExpected: false,
|
||||
},
|
||||
"client without avpn": {
|
||||
userPublicKey: testKey,
|
||||
endpoints: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
ips: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
errExpected: true,
|
||||
},
|
||||
"empty public key parameter": {
|
||||
avpn: &stubAVPNClient{},
|
||||
userPublicKey: "",
|
||||
endpoints: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
ips: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
errExpected: true,
|
||||
},
|
||||
"invalid public key parameter": {
|
||||
avpn: &stubAVPNClient{},
|
||||
userPublicKey: "invalid Key",
|
||||
endpoints: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
ips: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
errExpected: true,
|
||||
},
|
||||
"empty ips parameter": {
|
||||
avpn: &stubAVPNClient{},
|
||||
userPublicKey: testKey,
|
||||
endpoints: []string{},
|
||||
ips: []string{},
|
||||
errExpected: true,
|
||||
},
|
||||
"fail ActivateAsCoordinator": {
|
||||
avpn: &stubAVPNClient{activateAsCoordinatorErr: someErr},
|
||||
userPublicKey: testKey,
|
||||
endpoints: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
ips: []string{"192.0.2.1", "192.0.2.1", "192.0.2.1"},
|
||||
errExpected: true,
|
||||
},
|
||||
}
|
||||
@ -120,13 +120,13 @@ func TestActivate(t *testing.T) {
|
||||
if tc.avpn != nil {
|
||||
client.avpn = tc.avpn
|
||||
}
|
||||
_, err := client.Activate(context.Background(), []byte(tc.userPublicKey), []byte("Constellation"), tc.endpoints, nil, "serviceaccount://test")
|
||||
_, err := client.Activate(context.Background(), []byte(tc.userPublicKey), []byte("Constellation"), tc.ips, nil, "serviceaccount://test")
|
||||
if tc.errExpected {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
assert.Equal("32bytesWireGuardKeyForTheTesting", string(tc.avpn.activateAsCoordinatorReqKey))
|
||||
assert.Equal(tc.endpoints, tc.avpn.activateAsCoordinatorReqEndpoints)
|
||||
assert.Equal(tc.ips, tc.avpn.activateAsCoordinatorReqIPs)
|
||||
assert.Equal("Constellation", string(tc.avpn.activateAsCoordinatorMasterSecret))
|
||||
assert.Equal("serviceaccount://test", tc.avpn.activateCloudServiceAccountURI)
|
||||
}
|
||||
@ -135,13 +135,13 @@ func TestActivate(t *testing.T) {
|
||||
}
|
||||
|
||||
type stubAVPNClient struct {
|
||||
activateAsCoordinatorErr error
|
||||
activateAdditionalNodesErr error
|
||||
activateAsCoordinatorReqKey []byte
|
||||
activateAsCoordinatorReqEndpoints []string
|
||||
activateAsCoordinatorMasterSecret []byte
|
||||
activateAdditionalNodesReqEndpoints []string
|
||||
activateCloudServiceAccountURI string
|
||||
activateAsCoordinatorErr error
|
||||
activateAdditionalNodesErr error
|
||||
activateAsCoordinatorReqKey []byte
|
||||
activateAsCoordinatorReqIPs []string
|
||||
activateAsCoordinatorMasterSecret []byte
|
||||
activateAdditionalNodesReqIPs []string
|
||||
activateCloudServiceAccountURI string
|
||||
pubproto.APIClient
|
||||
}
|
||||
|
||||
@ -149,7 +149,7 @@ func (s *stubAVPNClient) ActivateAsCoordinator(ctx context.Context, in *pubproto
|
||||
opts ...grpc.CallOption,
|
||||
) (pubproto.API_ActivateAsCoordinatorClient, error) {
|
||||
s.activateAsCoordinatorReqKey = in.AdminVpnPubKey
|
||||
s.activateAsCoordinatorReqEndpoints = in.NodePublicEndpoints
|
||||
s.activateAsCoordinatorReqIPs = in.NodePublicIps
|
||||
s.activateAsCoordinatorMasterSecret = in.MasterSecret
|
||||
s.activateCloudServiceAccountURI = in.CloudServiceAccountUri
|
||||
return dummyAVPNActivateAsCoordinatorClient{}, s.activateAsCoordinatorErr
|
||||
@ -158,6 +158,6 @@ func (s *stubAVPNClient) ActivateAsCoordinator(ctx context.Context, in *pubproto
|
||||
func (s *stubAVPNClient) ActivateAdditionalNodes(ctx context.Context, in *pubproto.ActivateAdditionalNodesRequest,
|
||||
opts ...grpc.CallOption,
|
||||
) (pubproto.API_ActivateAdditionalNodesClient, error) {
|
||||
s.activateAdditionalNodesReqEndpoints = in.NodePublicEndpoints
|
||||
s.activateAdditionalNodesReqIPs = in.NodePublicIps
|
||||
return dummyAVPNActivateAdditionalNodesClient{}, s.activateAdditionalNodesErr
|
||||
}
|
||||
|
@ -89,18 +89,17 @@ func run(validator core.QuoteValidator, issuer core.QuoteIssuer, vpn core.VPN, o
|
||||
|
||||
if !nodeActivated {
|
||||
zapLoggerStartupJoin := zapLoggerCore.Named("startup-join")
|
||||
if err := tryJoinClusterOnStartup(getPublicIPAddr, metadata, bindPort, zapLoggerStartupJoin); err != nil {
|
||||
if err := tryJoinClusterOnStartup(getPublicIPAddr, metadata, zapLoggerStartupJoin); err != nil {
|
||||
zapLoggerStartupJoin.Info("joining existing cluster on startup failed. Waiting for connection.", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func tryJoinClusterOnStartup(getPublicIPAddr func() (string, error), metadata core.ProviderMetadata, bindPort string, logger *zap.Logger) error {
|
||||
func tryJoinClusterOnStartup(getPublicIPAddr func() (string, error), metadata core.ProviderMetadata, logger *zap.Logger) error {
|
||||
nodePublicIP, err := getPublicIPAddr()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve own public ip: %w", err)
|
||||
}
|
||||
nodeEndpoint := net.JoinHostPort(nodePublicIP, bindPort)
|
||||
if !metadata.Supported() {
|
||||
logger.Info("Metadata API not implemented for cloud provider")
|
||||
return errors.New("metadata API not implemented")
|
||||
@ -128,7 +127,7 @@ func tryJoinClusterOnStartup(getPublicIPAddr func() (string, error), metadata co
|
||||
defer conn.Close()
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
logger.Info("Activating as node on startup")
|
||||
_, err = client.ActivateAdditionalNodes(context.Background(), &pubproto.ActivateAdditionalNodesRequest{NodePublicEndpoints: []string{nodeEndpoint}})
|
||||
_, err = client.ActivateAdditionalNodes(context.Background(), &pubproto.ActivateAdditionalNodesRequest{NodePublicIps: []string{nodePublicIP}})
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -46,27 +46,28 @@ func TestCoordinator(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
nodeEndpoints := []string{"addr-1:10", "addr-2:20", "addr-3:30"}
|
||||
coordinatorEndpoint := "addr-0:15"
|
||||
nodeIPs := []string{"192.0.2.11", "192.0.2.12", "192.0.2.13"}
|
||||
coordinatorIP := "192.0.2.1"
|
||||
bindPort := "9000"
|
||||
logger := zaptest.NewLogger(t)
|
||||
dialer := testdialer.NewBufconnDialer()
|
||||
netw := newNetwork()
|
||||
|
||||
// spawn 4 peers: 1 designated coordinator and 3 nodes
|
||||
coordServer, coordPAPI, _ := spawnPeer(require, logger.Named("coord"), dialer, netw, coordinatorEndpoint)
|
||||
coordServer, coordPAPI, _ := spawnPeer(require, logger.Named("coord"), dialer, netw, net.JoinHostPort(coordinatorIP, bindPort))
|
||||
defer coordPAPI.Close()
|
||||
defer coordServer.GracefulStop()
|
||||
nodeServer1, nodePAPI1, nodeVPN1 := spawnPeer(require, logger.Named("node1"), dialer, netw, nodeEndpoints[0])
|
||||
nodeServer1, nodePAPI1, nodeVPN1 := spawnPeer(require, logger.Named("node1"), dialer, netw, net.JoinHostPort(nodeIPs[0], bindPort))
|
||||
defer nodePAPI1.Close()
|
||||
defer nodeServer1.GracefulStop()
|
||||
nodeServer2, nodePAPI2, nodeVPN2 := spawnPeer(require, logger.Named("node2"), dialer, netw, nodeEndpoints[1])
|
||||
nodeServer2, nodePAPI2, nodeVPN2 := spawnPeer(require, logger.Named("node2"), dialer, netw, net.JoinHostPort(nodeIPs[1], bindPort))
|
||||
defer nodePAPI2.Close()
|
||||
defer nodeServer2.GracefulStop()
|
||||
nodeServer3, nodePAPI3, nodeVPN3 := spawnPeer(require, logger.Named("node3"), dialer, netw, nodeEndpoints[2])
|
||||
nodeServer3, nodePAPI3, nodeVPN3 := spawnPeer(require, logger.Named("node3"), dialer, netw, net.JoinHostPort(nodeIPs[2], bindPort))
|
||||
defer nodePAPI3.Close()
|
||||
defer nodeServer3.GracefulStop()
|
||||
|
||||
require.NoError(activateCoordinator(require, dialer, coordinatorEndpoint, nodeEndpoints))
|
||||
require.NoError(activateCoordinator(require, dialer, coordinatorIP, bindPort, nodeIPs))
|
||||
|
||||
// send something from node 1 to node 2
|
||||
|
||||
@ -89,20 +90,21 @@ func TestConcurrent(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
nodeEndpoints := []string{"addr-1:10", "addr-2:20"}
|
||||
coordinatorEndpoint := "addr-0:15"
|
||||
nodeIPs := []string{"192.0.2.11", "192.0.2.12", "192.0.2.13"}
|
||||
coordinatorIP := "192.0.2.1"
|
||||
bindPort := "9000"
|
||||
logger := zaptest.NewLogger(t)
|
||||
dialer := testdialer.NewBufconnDialer()
|
||||
netw := newNetwork()
|
||||
|
||||
// spawn peers
|
||||
coordServer, coordPAPI, _ := spawnPeer(require, logger.Named("coord"), dialer, netw, coordinatorEndpoint)
|
||||
coordServer, coordPAPI, _ := spawnPeer(require, logger.Named("coord"), dialer, netw, net.JoinHostPort(coordinatorIP, bindPort))
|
||||
defer coordPAPI.Close()
|
||||
defer coordServer.GracefulStop()
|
||||
nodeServer1, nodePAPI1, _ := spawnPeer(require, logger.Named("node1"), dialer, netw, nodeEndpoints[0])
|
||||
nodeServer1, nodePAPI1, _ := spawnPeer(require, logger.Named("node1"), dialer, netw, net.JoinHostPort(nodeIPs[0], bindPort))
|
||||
defer nodePAPI1.Close()
|
||||
defer nodeServer1.GracefulStop()
|
||||
nodeServer2, nodePAPI2, _ := spawnPeer(require, logger.Named("node2"), dialer, netw, nodeEndpoints[1])
|
||||
nodeServer2, nodePAPI2, _ := spawnPeer(require, logger.Named("node2"), dialer, netw, net.JoinHostPort(nodeIPs[1], bindPort))
|
||||
defer nodePAPI2.Close()
|
||||
defer nodeServer2.GracefulStop()
|
||||
|
||||
@ -110,7 +112,7 @@ func TestConcurrent(t *testing.T) {
|
||||
|
||||
actCoord := func() {
|
||||
defer wg.Done()
|
||||
_ = activateCoordinator(require, dialer, coordinatorEndpoint, nodeEndpoints)
|
||||
_ = activateCoordinator(require, dialer, coordinatorIP, bindPort, nodeIPs)
|
||||
}
|
||||
|
||||
actNode := func(papi *pubapi.API) {
|
||||
@ -214,18 +216,18 @@ func spawnPeer(require *require.Assertions, logger *zap.Logger, dialer *testdial
|
||||
return server, papi, vpn
|
||||
}
|
||||
|
||||
func activateCoordinator(require *require.Assertions, dialer pubapi.Dialer, coordinatorEndpoint string, nodeEndpoints []string) error {
|
||||
func activateCoordinator(require *require.Assertions, dialer pubapi.Dialer, coordinatorIP, bindPort string, nodeIPs []string) error {
|
||||
ctx := context.Background()
|
||||
conn, err := dialGRPC(ctx, dialer, coordinatorEndpoint)
|
||||
conn, err := dialGRPC(ctx, dialer, net.JoinHostPort(coordinatorIP, bindPort))
|
||||
require.NoError(err)
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
stream, err := client.ActivateAsCoordinator(ctx, &pubproto.ActivateAsCoordinatorRequest{
|
||||
NodePublicEndpoints: nodeEndpoints,
|
||||
MasterSecret: []byte("Constellation"),
|
||||
KmsUri: kms.ClusterKMSURI,
|
||||
StorageUri: kms.NoStoreURI,
|
||||
NodePublicIps: nodeIPs,
|
||||
MasterSecret: []byte("Constellation"),
|
||||
KmsUri: kms.ClusterKMSURI,
|
||||
StorageUri: kms.NoStoreURI,
|
||||
})
|
||||
require.NoError(err)
|
||||
|
||||
@ -350,11 +352,7 @@ func (v *fakeVPN) RemovePeer(pubKey []byte) error {
|
||||
|
||||
func (v *fakeVPN) UpdatePeers(peers []peer.Peer) error {
|
||||
for _, peer := range peers {
|
||||
peerIP, _, err := net.SplitHostPort(peer.PublicEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := v.AddPeer(peer.VPNPubKey, peerIP, peer.VPNIP); err != nil {
|
||||
if err := v.AddPeer(peer.VPNPubKey, peer.PublicIP, peer.VPNIP); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -100,21 +100,9 @@ func (c *Core) SetVPNIP(ip string) error {
|
||||
return c.vpn.SetInterfaceIP(ip)
|
||||
}
|
||||
|
||||
// GetCoordinatorVPNIP returns the VPN IP designated for the Coordinator.
|
||||
func (*Core) GetCoordinatorVPNIP() string {
|
||||
return coordinatorVPNIP.String()
|
||||
}
|
||||
|
||||
// AddAdmin adds an admin to the VPN.
|
||||
func (c *Core) AddAdmin(pubKey []byte) (string, error) {
|
||||
vpnIP, err := c.GetNextNodeIP()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := c.vpn.AddPeer(pubKey, "", vpnIP); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return vpnIP, nil
|
||||
// GetVPNIP returns the cores VPN IP.
|
||||
func (c *Core) GetVPNIP() (string, error) {
|
||||
return c.vpn.GetInterfaceIP()
|
||||
}
|
||||
|
||||
// GetNextNodeIP gets the next free IP-Addr.
|
||||
@ -131,6 +119,20 @@ func (c *Core) GetNextNodeIP() (string, error) {
|
||||
return ip.String(), tx.Commit()
|
||||
}
|
||||
|
||||
// GetNextCoordinatorIP gets the next free IP-Addr.
|
||||
func (c *Core) GetNextCoordinatorIP() (string, error) {
|
||||
tx, err := c.store.BeginTransaction()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
txwrapper := storewrapper.StoreWrapper{Store: tx}
|
||||
ip, err := txwrapper.PopNextFreeCoordinatorIP()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ip.String(), tx.Commit()
|
||||
}
|
||||
|
||||
// SwitchToPersistentStore creates a new store using the persistentStoreFactory and transfers the initial temporary store into it.
|
||||
func (c *Core) SwitchToPersistentStore() error {
|
||||
newStore, err := c.persistentStoreFactory.New()
|
||||
|
@ -29,23 +29,6 @@ func TestMain(m *testing.M) {
|
||||
)
|
||||
}
|
||||
|
||||
func TestAddAdmin(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
vpn := &stubVPN{}
|
||||
core, err := NewCore(vpn, nil, nil, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(afero.NewMemMapFs()))
|
||||
require.NoError(err)
|
||||
require.NoError(core.InitializeStoreIPs())
|
||||
|
||||
pubKey := []byte{2, 3, 4}
|
||||
|
||||
vpnIP, err := core.AddAdmin(pubKey)
|
||||
require.NoError(err)
|
||||
assert.NotNil(net.ParseIP(vpnIP))
|
||||
assert.Equal([]stubVPNPeer{{pubKey: pubKey, vpnIP: vpnIP}}, vpn.peers)
|
||||
}
|
||||
|
||||
func TestGetNextNodeIP(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
@ -47,33 +47,34 @@ func TestLegacyActivateCoordinator(t *testing.T) {
|
||||
require.NoError(err)
|
||||
defer nodeAPI3.Close()
|
||||
|
||||
nodeEndpoints := []string{"addr-1:10", "addr-2:20", "addr-3:30"}
|
||||
coordinatorEndpoint := "addr-0:15"
|
||||
|
||||
nodeServer1, err := spawnNode(nodeEndpoints[0], nodeAPI1, bufDialer)
|
||||
nodeIPs := []string{"192.0.2.11", "192.0.2.12", "192.0.2.13"}
|
||||
coordinatorIP := "192.0.2.1"
|
||||
bindPort := "9000"
|
||||
nodeServer1, err := spawnNode(net.JoinHostPort(nodeIPs[0], bindPort), nodeAPI1, bufDialer)
|
||||
require.NoError(err)
|
||||
defer nodeServer1.GracefulStop()
|
||||
nodeServer2, err := spawnNode(nodeEndpoints[1], nodeAPI2, bufDialer)
|
||||
nodeServer2, err := spawnNode(net.JoinHostPort(nodeIPs[1], bindPort), nodeAPI2, bufDialer)
|
||||
require.NoError(err)
|
||||
defer nodeServer2.GracefulStop()
|
||||
nodeServer3, err := spawnNode(nodeEndpoints[2], nodeAPI3, bufDialer)
|
||||
nodeServer3, err := spawnNode(net.JoinHostPort(nodeIPs[2], bindPort), nodeAPI3, bufDialer)
|
||||
require.NoError(err)
|
||||
defer nodeServer3.GracefulStop()
|
||||
|
||||
coordinatorCore, coordinatorAPI, err := newMockCoreWithDialer(bufDialer)
|
||||
require.NoError(err)
|
||||
require.NoError(coordinatorCore.SetVPNIP("10.118.0.1"))
|
||||
defer coordinatorAPI.Close()
|
||||
coordinatorServer, err := spawnNode(coordinatorEndpoint, coordinatorAPI, bufDialer)
|
||||
coordinatorServer, err := spawnNode(net.JoinHostPort(coordinatorIP, bindPort), coordinatorAPI, bufDialer)
|
||||
require.NoError(err)
|
||||
defer coordinatorServer.GracefulStop()
|
||||
|
||||
// activate coordinator
|
||||
activationReq := &pubproto.ActivateAsCoordinatorRequest{
|
||||
AdminVpnPubKey: adminVPNKey,
|
||||
NodePublicEndpoints: nodeEndpoints,
|
||||
MasterSecret: []byte("Constellation"),
|
||||
KmsUri: kms.ClusterKMSURI,
|
||||
StorageUri: kms.NoStoreURI,
|
||||
AdminVpnPubKey: adminVPNKey,
|
||||
NodePublicIps: nodeIPs,
|
||||
MasterSecret: []byte("Constellation"),
|
||||
KmsUri: kms.ClusterKMSURI,
|
||||
StorageUri: kms.NoStoreURI,
|
||||
}
|
||||
testActivationSvr := &stubAVPNActivateCoordinatorServer{}
|
||||
assert.NoError(coordinatorAPI.ActivateAsCoordinator(activationReq, testActivationSvr))
|
||||
@ -100,9 +101,9 @@ func TestLegacyActivateCoordinator(t *testing.T) {
|
||||
nodeResp, err := nodeAPI3.ActivateAsNode(context.TODO(), &pubproto.ActivateAsNodeRequest{
|
||||
NodeVpnIp: "192.0.2.1:9004",
|
||||
Peers: []*pubproto.Peer{{
|
||||
VpnPubKey: coordinatorKey,
|
||||
PublicEndpoint: coordinatorEndpoint,
|
||||
VpnIp: "10.118.0.1",
|
||||
VpnPubKey: coordinatorKey,
|
||||
PublicIp: coordinatorIP,
|
||||
VpnIp: "10.118.0.1",
|
||||
}},
|
||||
OwnerId: []byte("ownerID"),
|
||||
ClusterId: []byte("clusterID"),
|
||||
|
@ -1,14 +1,13 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/storewrapper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// GetPeers returns the stored peers if the requested version differs from the stored version.
|
||||
// peers include all vpn devices namely Coordinators, Nodes and Admins.
|
||||
func (c *Core) GetPeers(resourceVersion int) (int, []peer.Peer, error) {
|
||||
// Most often there's nothing to do, so first check without an expensive transaction.
|
||||
curVer, err := c.data().GetPeersResourceVersion()
|
||||
@ -47,23 +46,17 @@ func (c *Core) AddPeer(peer peer.Peer) error {
|
||||
|
||||
// AddPeerToVPN adds a peer to the the VPN.
|
||||
func (c *Core) AddPeerToVPN(peer peer.Peer) error {
|
||||
publicIP, _, err := net.SplitHostPort(peer.PublicEndpoint)
|
||||
if err != nil {
|
||||
c.zaplogger.Info("SplitHostPort", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// don't add myself to vpn
|
||||
myIP, err := c.vpn.GetInterfaceIP()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if myIP != peer.VPNIP {
|
||||
if err := c.vpn.AddPeer(peer.VPNPubKey, publicIP, peer.VPNIP); err != nil {
|
||||
c.zaplogger.Error("failed to add peer to VPN", zap.Error(err), zap.String("peer public_ip", publicIP), zap.String("peer vpn_ip", peer.VPNIP))
|
||||
if err := c.vpn.AddPeer(peer.VPNPubKey, peer.PublicIP, peer.VPNIP); err != nil {
|
||||
c.zaplogger.Error("failed to add peer to VPN", zap.Error(err), zap.String("peer public_ip", peer.PublicIP), zap.String("peer vpn_ip", peer.VPNIP))
|
||||
return err
|
||||
}
|
||||
c.zaplogger.Info("added peer to VPN", zap.String("peer public_ip", publicIP), zap.String("peer vpn_ip", peer.VPNIP))
|
||||
c.zaplogger.Info("added peer to VPN", zap.String("role", peer.Role.String()), zap.String("coordinator public_ip", peer.PublicIP), zap.String("coordinator vpn_ip", peer.VPNIP))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -86,7 +79,7 @@ func (c *Core) AddPeerToStore(peer peer.Peer) error {
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.zaplogger.Info("added peer to store", zap.String("peer public_endpoint", peer.PublicEndpoint), zap.String("peer vpn_ip", peer.VPNIP))
|
||||
c.zaplogger.Info("added peer to store", zap.String("peer public_ip", peer.PublicIP), zap.String("peer vpn_ip", peer.VPNIP))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -13,8 +13,8 @@ import (
|
||||
)
|
||||
|
||||
func TestGetPeers(t *testing.T) {
|
||||
peer1 := peer.Peer{PublicEndpoint: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}}
|
||||
peer2 := peer.Peer{PublicEndpoint: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}}
|
||||
peer1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}}
|
||||
peer2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}}
|
||||
|
||||
testCases := map[string]struct {
|
||||
storePeers []peer.Peer
|
||||
@ -74,9 +74,9 @@ func TestGetPeers(t *testing.T) {
|
||||
func TestAddPeer(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
testPeer := peer.Peer{
|
||||
PublicEndpoint: "192.0.2.11:2000",
|
||||
VPNIP: "192.0.2.21",
|
||||
VPNPubKey: []byte{2, 3, 4},
|
||||
PublicIP: "192.0.2.11",
|
||||
VPNIP: "192.0.2.21",
|
||||
VPNPubKey: []byte{2, 3, 4},
|
||||
}
|
||||
expectedVPNPeers := []stubVPNPeer{{
|
||||
pubKey: testPeer.VPNPubKey,
|
||||
@ -101,14 +101,6 @@ func TestAddPeer(t *testing.T) {
|
||||
vpn: stubVPN{interfaceIP: testPeer.VPNIP},
|
||||
expectedStorePeers: []peer.Peer{testPeer},
|
||||
},
|
||||
"public endpoint without port": {
|
||||
peer: peer.Peer{
|
||||
PublicEndpoint: "192.0.2.11",
|
||||
VPNIP: "192.0.2.21",
|
||||
VPNPubKey: []byte{2, 3, 4},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
"vpn add peer error": {
|
||||
peer: testPeer,
|
||||
vpn: stubVPN{addPeerErr: someErr},
|
||||
|
@ -3,7 +3,6 @@ package core
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"net"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
)
|
||||
@ -48,7 +47,8 @@ func (v *stubVPN) GetInterfaceIP() (string, error) {
|
||||
return v.interfaceIP, v.getInterfaceIPErr
|
||||
}
|
||||
|
||||
func (*stubVPN) SetInterfaceIP(ip string) error {
|
||||
func (v *stubVPN) SetInterfaceIP(ip string) error {
|
||||
v.interfaceIP = ip
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -70,11 +70,7 @@ func (v *stubVPN) RemovePeer(pubKey []byte) error {
|
||||
|
||||
func (v *stubVPN) UpdatePeers(peers []peer.Peer) error {
|
||||
for _, peer := range peers {
|
||||
peerIP, _, err := net.SplitHostPort(peer.PublicEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := v.AddPeer(peer.VPNPubKey, peerIP, peer.VPNIP); err != nil {
|
||||
if err := v.AddPeer(peer.VPNPubKey, peer.PublicIP, peer.VPNIP); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -6,22 +6,16 @@ import (
|
||||
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
|
||||
)
|
||||
|
||||
// AdminData contains all VPN information about the admin.
|
||||
type AdminData struct {
|
||||
VPNIP string
|
||||
PublicKey []byte
|
||||
}
|
||||
|
||||
// Peer holds all information about a peer.
|
||||
type Peer struct {
|
||||
// PublicEndpoint is the endpoint on which the peer is reachable.
|
||||
PublicEndpoint string
|
||||
// PublicIP is the public IP address on which the peer is reachable.
|
||||
PublicIP string
|
||||
// VPNIP holds the internal VPN address, can only be used within the VPN
|
||||
// and some gRPC services may only be reachable from this IP.
|
||||
VPNIP string
|
||||
// VPNPubKey contains the PublicKey used for cryptographic purposes in the VPN.
|
||||
VPNPubKey []byte
|
||||
// Role is the peer's role (Coordinator or Node).
|
||||
// Role is the peer's role (Coordinator, Node or Admin).
|
||||
Role role.Role
|
||||
}
|
||||
|
||||
@ -29,10 +23,10 @@ func FromPubProto(peers []*pubproto.Peer) []Peer {
|
||||
var result []Peer
|
||||
for _, p := range peers {
|
||||
result = append(result, Peer{
|
||||
PublicEndpoint: p.PublicEndpoint,
|
||||
VPNIP: p.VpnIp,
|
||||
VPNPubKey: p.VpnPubKey,
|
||||
Role: role.Role(p.Role),
|
||||
PublicIP: p.PublicIp,
|
||||
VPNIP: p.VpnIp,
|
||||
VPNPubKey: p.VpnPubKey,
|
||||
Role: role.Role(p.Role),
|
||||
})
|
||||
}
|
||||
return result
|
||||
@ -42,10 +36,10 @@ func ToPubProto(peers []Peer) []*pubproto.Peer {
|
||||
var result []*pubproto.Peer
|
||||
for _, p := range peers {
|
||||
result = append(result, &pubproto.Peer{
|
||||
PublicEndpoint: p.PublicEndpoint,
|
||||
VpnIp: p.VPNIP,
|
||||
VpnPubKey: p.VPNPubKey,
|
||||
Role: uint32(p.Role),
|
||||
PublicIp: p.PublicIP,
|
||||
VpnIp: p.VPNIP,
|
||||
VpnPubKey: p.VPNPubKey,
|
||||
Role: uint32(p.Role),
|
||||
})
|
||||
}
|
||||
return result
|
||||
@ -55,10 +49,10 @@ func FromVPNProto(peers []*vpnproto.Peer) []Peer {
|
||||
var result []Peer
|
||||
for _, p := range peers {
|
||||
result = append(result, Peer{
|
||||
PublicEndpoint: p.PublicEndpoint,
|
||||
VPNIP: p.VpnIp,
|
||||
VPNPubKey: p.VpnPubKey,
|
||||
Role: role.Role(p.Role),
|
||||
PublicIP: p.PublicIp,
|
||||
VPNIP: p.VpnIp,
|
||||
VPNPubKey: p.VpnPubKey,
|
||||
Role: role.Role(p.Role),
|
||||
})
|
||||
}
|
||||
return result
|
||||
@ -68,10 +62,10 @@ func ToVPNProto(peers []Peer) []*vpnproto.Peer {
|
||||
var result []*vpnproto.Peer
|
||||
for _, p := range peers {
|
||||
result = append(result, &vpnproto.Peer{
|
||||
PublicEndpoint: p.PublicEndpoint,
|
||||
VpnIp: p.VPNIP,
|
||||
VpnPubKey: p.VPNPubKey,
|
||||
Role: uint32(p.Role),
|
||||
PublicIp: p.PublicIP,
|
||||
VpnIp: p.VPNIP,
|
||||
VpnPubKey: p.VPNPubKey,
|
||||
Role: uint32(p.Role),
|
||||
})
|
||||
}
|
||||
return result
|
||||
|
@ -70,8 +70,11 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
if err := a.core.SetUpKMS(ctx, in.StorageUri, in.KmsUri, in.KeyEncryptionKeyId, in.UseExistingKek); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
coordPeer, err := a.makeCoordinatorPeer()
|
||||
vpnIP, err := a.core.GetNextCoordinatorIP()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "could not obtain coordinator vpn ip%v", err)
|
||||
}
|
||||
coordPeer, err := a.assemblePeerStruct(vpnIP, role.Coordinator)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
@ -100,9 +103,8 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO: check performance and maybe make concurrent
|
||||
if err := a.activateNodes(logToCLI, in.NodePublicEndpoints, coordPeer); err != nil {
|
||||
if err := a.activateNodes(logToCLI, in.NodePublicIps); err != nil {
|
||||
a.logger.Error("node activation failed", zap.Error(err))
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
@ -115,9 +117,16 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
if err := a.core.PersistNodeState(role.Coordinator, ownerID, clusterID); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
adminVPNIP, err := a.core.GetNextNodeIP()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
// This effectively gives code execution, so we do this last.
|
||||
adminVPNIP, err := a.core.AddAdmin(in.AdminVpnPubKey)
|
||||
err = a.core.AddPeer(peer.Peer{
|
||||
VPNIP: adminVPNIP,
|
||||
VPNPubKey: in.AdminVpnPubKey,
|
||||
Role: role.Admin,
|
||||
})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
@ -141,11 +150,6 @@ func (a *API) ActivateAdditionalNodes(in *pubproto.ActivateAdditionalNodesReques
|
||||
return status.Errorf(codes.FailedPrecondition, "%v", err)
|
||||
}
|
||||
|
||||
coordPeer, err := a.makeCoordinatorPeer()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
logToCLI := a.newLogToCLIFunc(func(msg string) error {
|
||||
return srv.Send(&pubproto.ActivateAdditionalNodesResponse{
|
||||
Log: &pubproto.Log{
|
||||
@ -155,7 +159,7 @@ func (a *API) ActivateAdditionalNodes(in *pubproto.ActivateAdditionalNodesReques
|
||||
})
|
||||
|
||||
// TODO: check performance and maybe make concurrent
|
||||
if err := a.activateNodes(logToCLI, in.NodePublicEndpoints, coordPeer); err != nil {
|
||||
if err := a.activateNodes(logToCLI, in.NodePublicIps); err != nil {
|
||||
a.logger.Error("node activation failed", zap.Error(err))
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
@ -182,9 +186,13 @@ func (a *API) RequestStateDiskKey(ctx context.Context, in *pubproto.RequestState
|
||||
return &pubproto.RequestStateDiskKeyResponse{}, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (a *API) activateNodes(logToCLI logFunc, nodePublicEndpoints []string, coordPeer peer.Peer) error {
|
||||
// Create initial peer data to be sent to the nodes. Currently, this is just this Coordinator.
|
||||
initialPeers := peer.ToPubProto([]peer.Peer{coordPeer})
|
||||
func (a *API) activateNodes(logToCLI logFunc, nodePublicIPs []string) error {
|
||||
_, peers, err := a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// we need to add at least all coordinators to the peer for HA
|
||||
initialPeers := peer.ToPubProto(peers)
|
||||
|
||||
ownerID, clusterID, err := a.core.GetIDs(nil)
|
||||
if err != nil {
|
||||
@ -192,42 +200,60 @@ func (a *API) activateNodes(logToCLI logFunc, nodePublicEndpoints []string, coor
|
||||
}
|
||||
|
||||
// Activate all nodes.
|
||||
for num, nodePublicEndpoint := range nodePublicEndpoints {
|
||||
logToCLI("Activating node %3d out of %3d ...", num+1, len(nodePublicEndpoints))
|
||||
for num, nodePublicIP := range nodePublicIPs {
|
||||
logToCLI("activating node %3d out of %3d nodes ...", num+1, len(nodePublicIPs))
|
||||
nodeVPNIP, err := a.core.GetNextNodeIP()
|
||||
if err != nil {
|
||||
a.logger.Error("generation of vpn ips failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
nodeVpnPubKey, err := a.activateNode(nodePublicEndpoint, nodeVPNIP, initialPeers, ownerID, clusterID)
|
||||
nodeVpnPubKey, err := a.activateNode(nodePublicIP, nodeVPNIP, initialPeers, ownerID, clusterID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
peer := peer.Peer{
|
||||
PublicEndpoint: nodePublicEndpoint,
|
||||
VPNIP: nodeVPNIP,
|
||||
VPNPubKey: nodeVpnPubKey,
|
||||
Role: role.Node,
|
||||
PublicIP: nodePublicIP,
|
||||
VPNIP: nodeVPNIP,
|
||||
VPNPubKey: nodeVpnPubKey,
|
||||
Role: role.Node,
|
||||
}
|
||||
if err := a.core.AddPeer(peer); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.joinCluster(nodePublicEndpoint); err != nil {
|
||||
// This can be omitted if we
|
||||
// 1. Use a gRPC HA balancer mechanism, which picks one active coordinator connection
|
||||
// (nodeUpdate loop causes problems, even if we specify the IP in the joinCluster RPC)
|
||||
if err := a.updateCoordinator(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.joinCluster(nodePublicIP); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Manually trigger an update operation on all nodes.
|
||||
// Manually trigger an update operation on all peers.
|
||||
// This may be expendable in the future, depending on whether it's acceptable that it takes
|
||||
// some seconds until the nodes get all peer data via their regular update requests.
|
||||
_, peers, err := a.core.GetPeers(0)
|
||||
_, peers, err = a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vpnIP, err := a.core.GetVPNIP()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range peers {
|
||||
if p.Role == role.Node {
|
||||
if err := a.triggerNodeUpdate(p.PublicEndpoint); err != nil {
|
||||
a.logger.DPanic("TriggerNodeUpdate failed", zap.Error(err))
|
||||
if err := a.triggerNodeUpdate(p.PublicIP); err != nil {
|
||||
a.logger.Error("TriggerNodeUpdate failed", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
if p.Role == role.Coordinator && p.VPNIP != vpnIP {
|
||||
a.logger.Info("update coordinator", zap.String("coordinator vpnIP", p.VPNIP))
|
||||
if err := a.triggerCoordinatorUpdate(context.TODO(), p.PublicIP); err != nil {
|
||||
// no reason to panic here, we can recover
|
||||
a.logger.Error("triggerCoordinatorUpdate failed", zap.Error(err), zap.String("endpoint", p.PublicIP), zap.String("vpnip", p.VPNIP))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -235,11 +261,11 @@ func (a *API) activateNodes(logToCLI logFunc, nodePublicEndpoints []string, coor
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *API) activateNode(nodePublicEndpoint string, nodeVPNIP string, initialPeers []*pubproto.Peer, ownerID, clusterID []byte) ([]byte, error) {
|
||||
func (a *API) activateNode(nodePublicIP string, nodeVPNIP string, initialPeers []*pubproto.Peer, ownerID, clusterID []byte) ([]byte, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), deadlineDuration)
|
||||
defer cancel()
|
||||
|
||||
conn, err := a.dial(ctx, nodePublicEndpoint)
|
||||
conn, err := a.dial(ctx, net.JoinHostPort(nodePublicIP, endpointAVPNPort))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -261,22 +287,23 @@ func (a *API) activateNode(nodePublicEndpoint string, nodeVPNIP string, initialP
|
||||
return resp.NodeVpnPubKey, nil
|
||||
}
|
||||
|
||||
func (a *API) makeCoordinatorPeer() (peer.Peer, error) {
|
||||
coordinatorVPNPubKey, err := a.core.GetVPNPubKey()
|
||||
// assemblePeerStruct combines all information of this peer into a peer struct.
|
||||
func (a *API) assemblePeerStruct(vpnIP string, _ role.Role) (peer.Peer, error) {
|
||||
vpnPubKey, err := a.core.GetVPNPubKey()
|
||||
if err != nil {
|
||||
a.logger.Error("could not get key", zap.Error(err))
|
||||
return peer.Peer{}, err
|
||||
}
|
||||
coordinatorPublicIP, err := a.getPublicIPAddr()
|
||||
publicIP, err := a.getPublicIPAddr()
|
||||
if err != nil {
|
||||
a.logger.Error("could not get public IP", zap.Error(err))
|
||||
return peer.Peer{}, err
|
||||
}
|
||||
return peer.Peer{
|
||||
PublicEndpoint: net.JoinHostPort(coordinatorPublicIP, endpointAVPNPort),
|
||||
VPNIP: a.core.GetCoordinatorVPNIP(),
|
||||
VPNPubKey: coordinatorVPNPubKey,
|
||||
Role: role.Coordinator,
|
||||
PublicIP: publicIP,
|
||||
VPNIP: vpnIP,
|
||||
VPNPubKey: vpnPubKey,
|
||||
Role: role.Coordinator,
|
||||
}, err
|
||||
}
|
||||
|
||||
@ -288,31 +315,55 @@ func (a *API) newLogToCLIFunc(send func(string) error) logFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *API) joinCluster(nodePublicEndpoint string) error {
|
||||
func (a *API) joinCluster(nodePublicIP string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), deadlineDuration)
|
||||
defer cancel()
|
||||
|
||||
vpnIP, err := a.core.GetVPNIP()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We don't verify the peer certificate here, since JoinCluster triggers a connection over VPN
|
||||
// The target of the rpc needs to already be part of the VPN to process the request, meaning it is trusted
|
||||
conn, err := a.dialNoVerify(ctx, nodePublicEndpoint)
|
||||
conn, err := a.dialNoVerify(ctx, net.JoinHostPort(nodePublicIP, endpointAVPNPort))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := pubproto.NewAPIClient(conn)
|
||||
_, err = client.JoinCluster(ctx, &pubproto.JoinClusterRequest{})
|
||||
_, err = client.JoinCluster(ctx, &pubproto.JoinClusterRequest{CoordinatorVpnIp: vpnIP})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *API) triggerNodeUpdate(nodePublicEndpoint string) error {
|
||||
func (a *API) updateCoordinator() error {
|
||||
_, peers, err := a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vpnIP, err := a.core.GetVPNIP()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range peers {
|
||||
if p.Role == role.Coordinator && p.VPNIP != vpnIP {
|
||||
a.logger.Info("update coordinator", zap.String("coordinator vpnIP", p.VPNIP))
|
||||
if err := a.triggerCoordinatorUpdate(context.TODO(), p.PublicIP); err != nil {
|
||||
a.logger.Error("triggerCoordinatorUpdate failed", zap.Error(err), zap.String("endpoint", p.PublicIP), zap.String("vpnip", p.VPNIP))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *API) triggerNodeUpdate(nodePublicIP string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), deadlineDuration)
|
||||
defer cancel()
|
||||
|
||||
// We don't verify the peer certificate here, since TriggerNodeUpdate triggers a connection over VPN
|
||||
// The target of the rpc needs to already be part of the VPN to process the request, meaning it is trusted
|
||||
conn, err := a.dialNoVerify(ctx, nodePublicEndpoint)
|
||||
conn, err := a.dialNoVerify(ctx, net.JoinHostPort(nodePublicIP, endpointAVPNPort))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/atls"
|
||||
@ -24,71 +25,77 @@ import (
|
||||
func TestActivateAsCoordinator(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
coordinatorPubKey := []byte{6, 7, 8}
|
||||
testNode1 := &stubNode{publicIP: "192.0.2.11", pubKey: []byte{1, 2, 3}}
|
||||
testNode2 := &stubNode{publicIP: "192.0.2.12", pubKey: []byte{2, 3, 4}}
|
||||
testNode3 := &stubNode{publicIP: "192.0.2.13", pubKey: []byte{3, 4, 5}}
|
||||
expectedNode1 := peer.Peer{PublicEndpoint: "192.0.2.11:9000", VPNIP: "192.0.2.101", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
||||
expectedNode2 := peer.Peer{PublicEndpoint: "192.0.2.12:9000", VPNIP: "192.0.2.102", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
||||
expectedNode3 := peer.Peer{PublicEndpoint: "192.0.2.13:9000", VPNIP: "192.0.2.103", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
||||
expectedCoord := peer.Peer{PublicEndpoint: "192.0.2.1:9000", VPNIP: "192.0.2.100", VPNPubKey: coordinatorPubKey, Role: role.Coordinator}
|
||||
testNode1 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.11", VPNPubKey: []byte{1, 2, 3}}}
|
||||
testNode2 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.12", VPNPubKey: []byte{2, 3, 4}}}
|
||||
testNode3 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.13", VPNPubKey: []byte{3, 4, 5}}}
|
||||
expectedNode1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "10.118.0.11", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
||||
expectedNode2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "10.118.0.12", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
||||
expectedNode3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "10.118.0.13", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
||||
expectedCoord := peer.Peer{PublicIP: "192.0.2.1", VPNIP: "10.118.0.1", VPNPubKey: coordinatorPubKey, Role: role.Coordinator}
|
||||
adminPeer := peer.Peer{VPNPubKey: []byte{7, 8, 9}, Role: role.Admin}
|
||||
|
||||
testCases := map[string]struct {
|
||||
nodes []*stubNode
|
||||
nodes []*stubPeer
|
||||
state state.State
|
||||
switchToPersistentStoreErr error
|
||||
expectErr bool
|
||||
expectedPeers []peer.Peer
|
||||
expectedState state.State
|
||||
adminVPNIP string
|
||||
}{
|
||||
"0 nodes": {
|
||||
state: state.AcceptingInit,
|
||||
expectedPeers: []peer.Peer{expectedCoord},
|
||||
expectedState: state.ActivatingNodes,
|
||||
adminVPNIP: "10.118.0.11",
|
||||
},
|
||||
"1 node": {
|
||||
nodes: []*stubNode{testNode1},
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.AcceptingInit,
|
||||
expectedPeers: []peer.Peer{expectedCoord, expectedNode1},
|
||||
expectedState: state.ActivatingNodes,
|
||||
adminVPNIP: "10.118.0.12",
|
||||
},
|
||||
"2 nodes": {
|
||||
nodes: []*stubNode{testNode1, testNode2},
|
||||
nodes: []*stubPeer{testNode1, testNode2},
|
||||
state: state.AcceptingInit,
|
||||
expectedPeers: []peer.Peer{expectedCoord, expectedNode1, expectedNode2},
|
||||
expectedState: state.ActivatingNodes,
|
||||
adminVPNIP: "10.118.0.13",
|
||||
},
|
||||
"3 nodes": {
|
||||
nodes: []*stubNode{testNode1, testNode2, testNode3},
|
||||
nodes: []*stubPeer{testNode1, testNode2, testNode3},
|
||||
state: state.AcceptingInit,
|
||||
expectedPeers: []peer.Peer{expectedCoord, expectedNode1, expectedNode2, expectedNode3},
|
||||
expectedState: state.ActivatingNodes,
|
||||
adminVPNIP: "10.118.0.14",
|
||||
},
|
||||
"already activated": {
|
||||
nodes: []*stubNode{testNode1},
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.ActivatingNodes,
|
||||
expectErr: true,
|
||||
expectedState: state.ActivatingNodes,
|
||||
},
|
||||
"wrong peer kind": {
|
||||
nodes: []*stubNode{testNode1},
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.IsNode,
|
||||
expectErr: true,
|
||||
expectedState: state.IsNode,
|
||||
},
|
||||
"node activation error": {
|
||||
nodes: []*stubNode{testNode1, {activateErr: someErr}, testNode3},
|
||||
nodes: []*stubPeer{testNode1, {activateErr: someErr}, testNode3},
|
||||
state: state.AcceptingInit,
|
||||
expectErr: true,
|
||||
expectedState: state.Failed,
|
||||
},
|
||||
"node join error": {
|
||||
nodes: []*stubNode{testNode1, {joinErr: someErr}, testNode3},
|
||||
nodes: []*stubPeer{testNode1, {joinErr: someErr}, testNode3},
|
||||
state: state.AcceptingInit,
|
||||
expectErr: true,
|
||||
expectedState: state.Failed,
|
||||
},
|
||||
"SwitchToPersistentStore error": {
|
||||
nodes: []*stubNode{testNode1},
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.AcceptingInit,
|
||||
switchToPersistentStoreErr: someErr,
|
||||
expectErr: true,
|
||||
@ -101,7 +108,6 @@ func TestActivateAsCoordinator(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
adminPubKey := []byte{7, 8, 9}
|
||||
autoscalingNodeGroups := []string{"ang1", "ang2"}
|
||||
keyEncryptionKeyID := "constellation"
|
||||
|
||||
@ -120,21 +126,28 @@ func TestActivateAsCoordinator(t *testing.T) {
|
||||
}
|
||||
|
||||
api := New(zaptest.NewLogger(t), core, dialer, stubVPNAPIServer{}, fakeValidator{}, getPublicIPAddr)
|
||||
defer api.Close()
|
||||
|
||||
// spawn nodes
|
||||
var nodePublicEndpoints []string
|
||||
var nodePublicIPs []string
|
||||
var wg sync.WaitGroup
|
||||
for _, n := range tc.nodes {
|
||||
publicEndpoint := net.JoinHostPort(n.publicIP, endpointAVPNPort)
|
||||
nodePublicEndpoints = append(nodePublicEndpoints, publicEndpoint)
|
||||
nodePublicIPs = append(nodePublicIPs, n.peer.PublicIP)
|
||||
server := n.newServer()
|
||||
go server.Serve(dialer.GetListener(publicEndpoint))
|
||||
wg.Add(1)
|
||||
go func(endpoint string) {
|
||||
listener := dialer.GetListener(endpoint)
|
||||
wg.Done()
|
||||
_ = server.Serve(listener)
|
||||
}(net.JoinHostPort(n.peer.PublicIP, endpointAVPNPort))
|
||||
defer server.GracefulStop()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
stream := &stubActivateAsCoordinatorServer{}
|
||||
err := api.ActivateAsCoordinator(&pubproto.ActivateAsCoordinatorRequest{
|
||||
AdminVpnPubKey: adminPubKey,
|
||||
NodePublicEndpoints: nodePublicEndpoints,
|
||||
AdminVpnPubKey: adminPeer.VPNPubKey,
|
||||
NodePublicIps: nodePublicIPs,
|
||||
AutoscalingNodeGroups: autoscalingNodeGroups,
|
||||
MasterSecret: []byte("Constellation"),
|
||||
KeyEncryptionKeyId: keyEncryptionKeyID,
|
||||
@ -157,16 +170,19 @@ func TestActivateAsCoordinator(t *testing.T) {
|
||||
assert.NotEmpty(stream.sent[i].GetLog().Message)
|
||||
}
|
||||
adminConfig := stream.sent[len(stream.sent)-1].GetAdminConfig()
|
||||
assert.Equal("192.0.2.99", adminConfig.AdminVpnIp)
|
||||
assert.Equal(tc.adminVPNIP, adminConfig.AdminVpnIp)
|
||||
assert.Equal(coordinatorPubKey, adminConfig.CoordinatorVpnPubKey)
|
||||
assert.Equal(core.kubeconfig, adminConfig.Kubeconfig)
|
||||
assert.Equal(core.ownerID, adminConfig.OwnerId)
|
||||
assert.Equal(core.clusterID, adminConfig.ClusterId)
|
||||
|
||||
// Core is updated
|
||||
assert.Equal(adminPubKey, core.adminPubKey)
|
||||
assert.Equal(core.GetCoordinatorVPNIP(), core.vpnIP)
|
||||
assert.Equal(tc.expectedPeers, core.peers)
|
||||
vpnIP, err := core.GetVPNIP()
|
||||
require.NoError(err)
|
||||
assert.Equal(vpnIP, core.vpnIP)
|
||||
// construct full list of expected peers
|
||||
adminPeer.VPNIP = tc.adminVPNIP
|
||||
assert.Equal(append(tc.expectedPeers, adminPeer), core.peers)
|
||||
assert.Equal(autoscalingNodeGroups, core.autoscalingNodeGroups)
|
||||
assert.Equal(keyEncryptionKeyID, core.kekID)
|
||||
assert.Equal([]role.Role{role.Coordinator}, core.persistNodeStateRoles)
|
||||
@ -176,15 +192,15 @@ func TestActivateAsCoordinator(t *testing.T) {
|
||||
|
||||
func TestActivateAdditionalNodes(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
testNode1 := &stubNode{publicIP: "192.0.2.11", pubKey: []byte{1, 2, 3}}
|
||||
testNode2 := &stubNode{publicIP: "192.0.2.12", pubKey: []byte{2, 3, 4}}
|
||||
testNode3 := &stubNode{publicIP: "192.0.2.13", pubKey: []byte{3, 4, 5}}
|
||||
expectedNode1 := peer.Peer{PublicEndpoint: "192.0.2.11:9000", VPNIP: "192.0.2.101", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
||||
expectedNode2 := peer.Peer{PublicEndpoint: "192.0.2.12:9000", VPNIP: "192.0.2.102", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
||||
expectedNode3 := peer.Peer{PublicEndpoint: "192.0.2.13:9000", VPNIP: "192.0.2.103", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
||||
testNode1 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.11", VPNPubKey: []byte{1, 2, 3}}}
|
||||
testNode2 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.12", VPNPubKey: []byte{2, 3, 4}}}
|
||||
testNode3 := &stubPeer{peer: peer.Peer{PublicIP: "192.0.2.13", VPNPubKey: []byte{3, 4, 5}}}
|
||||
expectedNode1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "10.118.0.11", VPNPubKey: []byte{1, 2, 3}, Role: role.Node}
|
||||
expectedNode2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "10.118.0.12", VPNPubKey: []byte{2, 3, 4}, Role: role.Node}
|
||||
expectedNode3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "10.118.0.13", VPNPubKey: []byte{3, 4, 5}, Role: role.Node}
|
||||
|
||||
testCases := map[string]struct {
|
||||
nodes []*stubNode
|
||||
nodes []*stubPeer
|
||||
state state.State
|
||||
expectErr bool
|
||||
expectedPeers []peer.Peer
|
||||
@ -193,36 +209,36 @@ func TestActivateAdditionalNodes(t *testing.T) {
|
||||
state: state.ActivatingNodes,
|
||||
},
|
||||
"1 node": {
|
||||
nodes: []*stubNode{testNode1},
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.ActivatingNodes,
|
||||
expectedPeers: []peer.Peer{expectedNode1},
|
||||
},
|
||||
"2 nodes": {
|
||||
nodes: []*stubNode{testNode1, testNode2},
|
||||
nodes: []*stubPeer{testNode1, testNode2},
|
||||
state: state.ActivatingNodes,
|
||||
expectedPeers: []peer.Peer{expectedNode1, expectedNode2},
|
||||
},
|
||||
"3 nodes": {
|
||||
nodes: []*stubNode{testNode1, testNode2, testNode3},
|
||||
nodes: []*stubPeer{testNode1, testNode2, testNode3},
|
||||
state: state.ActivatingNodes,
|
||||
expectedPeers: []peer.Peer{expectedNode1, expectedNode2, expectedNode3},
|
||||
},
|
||||
"uninitialized": {
|
||||
nodes: []*stubNode{testNode1},
|
||||
nodes: []*stubPeer{testNode1},
|
||||
expectErr: true,
|
||||
},
|
||||
"wrong peer kind": {
|
||||
nodes: []*stubNode{testNode1},
|
||||
nodes: []*stubPeer{testNode1},
|
||||
state: state.IsNode,
|
||||
expectErr: true,
|
||||
},
|
||||
"node activation error": {
|
||||
nodes: []*stubNode{testNode1, {activateErr: someErr}, testNode3},
|
||||
nodes: []*stubPeer{testNode1, {activateErr: someErr}, testNode3},
|
||||
state: state.ActivatingNodes,
|
||||
expectErr: true,
|
||||
},
|
||||
"node join error": {
|
||||
nodes: []*stubNode{testNode1, {joinErr: someErr}, testNode3},
|
||||
nodes: []*stubPeer{testNode1, {joinErr: someErr}, testNode3},
|
||||
state: state.ActivatingNodes,
|
||||
expectErr: true,
|
||||
},
|
||||
@ -241,19 +257,26 @@ func TestActivateAdditionalNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
api := New(zaptest.NewLogger(t), core, dialer, nil, fakeValidator{}, getPublicIPAddr)
|
||||
|
||||
defer api.Close()
|
||||
// spawn nodes
|
||||
var nodePublicEndpoints []string
|
||||
var nodePublicIPs []string
|
||||
var wg sync.WaitGroup
|
||||
for _, n := range tc.nodes {
|
||||
publicEndpoint := net.JoinHostPort(n.publicIP, endpointAVPNPort)
|
||||
nodePublicEndpoints = append(nodePublicEndpoints, publicEndpoint)
|
||||
nodePublicIPs = append(nodePublicIPs, n.peer.PublicIP)
|
||||
server := n.newServer()
|
||||
go server.Serve(dialer.GetListener(publicEndpoint))
|
||||
wg.Add(1)
|
||||
go func(endpoint string) {
|
||||
listener := dialer.GetListener(endpoint)
|
||||
wg.Done()
|
||||
_ = server.Serve(listener)
|
||||
}(net.JoinHostPort(n.peer.PublicIP, endpointAVPNPort))
|
||||
defer server.GracefulStop()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
// since we are not activating the coordinator, initialize the store with IP's
|
||||
require.NoError(core.InitializeStoreIPs())
|
||||
stream := &stubActivateAdditionalNodesServer{}
|
||||
err := api.ActivateAdditionalNodes(&pubproto.ActivateAdditionalNodesRequest{NodePublicEndpoints: nodePublicEndpoints}, stream)
|
||||
err := api.ActivateAdditionalNodes(&pubproto.ActivateAdditionalNodesRequest{NodePublicIps: nodePublicIPs}, stream)
|
||||
if tc.expectErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
@ -272,7 +295,7 @@ func TestActivateAdditionalNodes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeCoordinatorPeer(t *testing.T) {
|
||||
func TestAssemblePeerStruct(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
@ -283,40 +306,46 @@ func TestMakeCoordinatorPeer(t *testing.T) {
|
||||
vpnPubKey := []byte{2, 3, 4}
|
||||
core := &fakeCore{vpnPubKey: vpnPubKey}
|
||||
api := New(zaptest.NewLogger(t), core, nil, nil, nil, getPublicIPAddr)
|
||||
defer api.Close()
|
||||
|
||||
vpnIP, err := core.GetVPNIP()
|
||||
require.NoError(err)
|
||||
expected := peer.Peer{
|
||||
PublicEndpoint: "192.0.2.1:9000",
|
||||
VPNIP: core.GetCoordinatorVPNIP(),
|
||||
VPNPubKey: vpnPubKey,
|
||||
Role: role.Coordinator,
|
||||
PublicIP: "192.0.2.1",
|
||||
VPNIP: vpnIP,
|
||||
VPNPubKey: vpnPubKey,
|
||||
Role: role.Coordinator,
|
||||
}
|
||||
|
||||
actual, err := api.makeCoordinatorPeer()
|
||||
actual, err := api.assemblePeerStruct(vpnIP, role.Coordinator)
|
||||
require.NoError(err)
|
||||
assert.Equal(expected, actual)
|
||||
}
|
||||
|
||||
type stubNode struct {
|
||||
publicIP string
|
||||
pubKey []byte
|
||||
type stubPeer struct {
|
||||
peer peer.Peer
|
||||
activateErr error
|
||||
joinErr error
|
||||
pubproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func (n *stubNode) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeRequest) (*pubproto.ActivateAsNodeResponse, error) {
|
||||
return &pubproto.ActivateAsNodeResponse{NodeVpnPubKey: n.pubKey}, n.activateErr
|
||||
func (n *stubPeer) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeRequest) (*pubproto.ActivateAsNodeResponse, error) {
|
||||
return &pubproto.ActivateAsNodeResponse{NodeVpnPubKey: n.peer.VPNPubKey}, n.activateErr
|
||||
}
|
||||
|
||||
func (*stubNode) TriggerNodeUpdate(ctx context.Context, in *pubproto.TriggerNodeUpdateRequest) (*pubproto.TriggerNodeUpdateResponse, error) {
|
||||
func (n *stubPeer) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.ActivateAsAdditionalCoordinatorRequest) (*pubproto.ActivateAsAdditionalCoordinatorResponse, error) {
|
||||
return &pubproto.ActivateAsAdditionalCoordinatorResponse{}, n.activateErr
|
||||
}
|
||||
|
||||
func (*stubPeer) TriggerNodeUpdate(ctx context.Context, in *pubproto.TriggerNodeUpdateRequest) (*pubproto.TriggerNodeUpdateResponse, error) {
|
||||
return &pubproto.TriggerNodeUpdateResponse{}, nil
|
||||
}
|
||||
|
||||
func (n *stubNode) JoinCluster(ctx context.Context, in *pubproto.JoinClusterRequest) (*pubproto.JoinClusterResponse, error) {
|
||||
func (n *stubPeer) JoinCluster(ctx context.Context, in *pubproto.JoinClusterRequest) (*pubproto.JoinClusterResponse, error) {
|
||||
return &pubproto.JoinClusterResponse{}, n.joinErr
|
||||
}
|
||||
|
||||
func (n *stubNode) newServer() *grpc.Server {
|
||||
func (n *stubPeer) newServer() *grpc.Server {
|
||||
tlsConfig, err := atls.CreateAttestationServerTLSConfig(fakeIssuer{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -12,10 +12,10 @@ import (
|
||||
type Core interface {
|
||||
GetVPNPubKey() ([]byte, error)
|
||||
SetVPNIP(string) error
|
||||
GetCoordinatorVPNIP() string
|
||||
GetVPNIP() (string, error)
|
||||
InitializeStoreIPs() error
|
||||
AddAdmin(pubKey []byte) (string, error)
|
||||
GetNextNodeIP() (string, error)
|
||||
GetNextCoordinatorIP() (string, error)
|
||||
SwitchToPersistentStore() error
|
||||
GetIDs(masterSecret []byte) (ownerID []byte, clusterID []byte, err error)
|
||||
PersistNodeState(role role.Role, ownerID []byte, clusterID []byte) error
|
||||
@ -28,6 +28,8 @@ type Core interface {
|
||||
|
||||
GetPeers(resourceVersion int) (int, []peer.Peer, error)
|
||||
AddPeer(peer.Peer) error
|
||||
AddPeerToStore(peer.Peer) error
|
||||
AddPeerToVPN(peer.Peer) error
|
||||
UpdatePeers([]peer.Peer) error
|
||||
|
||||
InitCluster(autoscalingNodeGroups []string, cloudServiceAccountURI string) ([]byte, error)
|
||||
|
@ -3,7 +3,7 @@ package pubapi
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
@ -15,8 +15,8 @@ type fakeCore struct {
|
||||
vpnPubKey []byte
|
||||
vpnIP string
|
||||
setVPNIPErr error
|
||||
adminPubKey []byte
|
||||
nextIP int
|
||||
nextNodeIP netip.Addr
|
||||
nextCoordinatorIP netip.Addr
|
||||
switchToPersistentStoreErr error
|
||||
state state.State
|
||||
ownerID []byte
|
||||
@ -27,6 +27,8 @@ type fakeCore struct {
|
||||
autoscalingNodeGroups []string
|
||||
joinArgs []kubeadm.BootstrapTokenDiscovery
|
||||
joinClusterErr error
|
||||
UpdatePeersErr error
|
||||
GetPeersErr error
|
||||
persistNodeStateRoles []role.Role
|
||||
persistNodeStateErr error
|
||||
kekID string
|
||||
@ -47,21 +49,25 @@ func (c *fakeCore) SetVPNIP(ip string) error {
|
||||
}
|
||||
|
||||
func (c *fakeCore) InitializeStoreIPs() error {
|
||||
c.nextCoordinatorIP = netip.AddrFrom4([4]byte{10, 118, 0, 1})
|
||||
c.nextNodeIP = netip.AddrFrom4([4]byte{10, 118, 0, 11})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*fakeCore) GetCoordinatorVPNIP() string {
|
||||
return "192.0.2.100"
|
||||
}
|
||||
|
||||
func (c *fakeCore) AddAdmin(pubKey []byte) (string, error) {
|
||||
c.adminPubKey = pubKey
|
||||
return "192.0.2.99", nil
|
||||
func (c *fakeCore) GetVPNIP() (string, error) {
|
||||
return c.vpnIP, nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetNextNodeIP() (string, error) {
|
||||
c.nextIP++
|
||||
return fmt.Sprintf("192.0.2.%v", 100+c.nextIP), nil
|
||||
ip := c.nextNodeIP.String()
|
||||
c.nextNodeIP = c.nextNodeIP.Next()
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) GetNextCoordinatorIP() (string, error) {
|
||||
ip := c.nextCoordinatorIP.String()
|
||||
c.nextCoordinatorIP = c.nextCoordinatorIP.Next()
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) SwitchToPersistentStore() error {
|
||||
@ -87,8 +93,8 @@ func (c *fakeCore) AdvanceState(newState state.State, ownerID, clusterID []byte)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*fakeCore) GetPeers(resourceVersion int) (int, []peer.Peer, error) {
|
||||
return 0, nil, nil
|
||||
func (c *fakeCore) GetPeers(resourceVersion int) (int, []peer.Peer, error) {
|
||||
return 1, c.peers, c.GetPeersErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) AddPeer(peer peer.Peer) error {
|
||||
@ -96,9 +102,19 @@ func (c *fakeCore) AddPeer(peer peer.Peer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) AddPeerToStore(peer peer.Peer) error {
|
||||
c.peers = append(c.peers, peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) AddPeerToVPN(peer peer.Peer) error {
|
||||
c.peers = append(c.peers, peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) UpdatePeers(peers []peer.Peer) error {
|
||||
c.updatedPeers = append(c.updatedPeers, peers)
|
||||
return nil
|
||||
return c.UpdatePeersErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) InitCluster(autoscalingNodeGroups []string, cloudServiceAccountURI string) ([]byte, error) {
|
||||
|
@ -80,7 +80,7 @@ func (a *API) JoinCluster(ctx context.Context, in *pubproto.JoinClusterRequest)
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
|
||||
}
|
||||
|
||||
conn, err := a.dialInsecure(ctx, net.JoinHostPort(a.core.GetCoordinatorVPNIP(), vpnAPIPort))
|
||||
conn, err := a.dialInsecure(ctx, net.JoinHostPort(in.CoordinatorVpnIp, vpnAPIPort))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "%v", err)
|
||||
}
|
||||
@ -142,7 +142,8 @@ func (a *API) update(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, deadlineDuration)
|
||||
defer cancel()
|
||||
|
||||
conn, err := a.dialInsecure(ctx, net.JoinHostPort(a.core.GetCoordinatorVPNIP(), vpnAPIPort))
|
||||
// TODO: replace hardcoded IP
|
||||
conn, err := a.dialInsecure(ctx, net.JoinHostPort("10.118.0.1", vpnAPIPort))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -21,8 +21,8 @@ import (
|
||||
|
||||
func TestActivateAsNode(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
peer1 := peer.Peer{PublicEndpoint: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}}
|
||||
peer2 := peer.Peer{PublicEndpoint: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}}
|
||||
peer1 := peer.Peer{PublicIP: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}}
|
||||
peer2 := peer.Peer{PublicIP: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}}
|
||||
|
||||
testCases := map[string]struct {
|
||||
initialPeers []peer.Peer
|
||||
@ -90,7 +90,7 @@ func TestActivateAsNode(t *testing.T) {
|
||||
vserver := grpc.NewServer()
|
||||
vapi := &stubVPNAPI{peers: tc.updatedPeers, getUpdateErr: tc.getUpdateErr}
|
||||
vpnproto.RegisterAPIServer(vserver, vapi)
|
||||
go vserver.Serve(dialer.GetListener(net.JoinHostPort(core.GetCoordinatorVPNIP(), vpnAPIPort)))
|
||||
go vserver.Serve(dialer.GetListener(net.JoinHostPort("10.118.0.1", vpnAPIPort)))
|
||||
defer vserver.GracefulStop()
|
||||
|
||||
resp, err := api.ActivateAsNode(context.Background(), &pubproto.ActivateAsNodeRequest{
|
||||
@ -130,8 +130,8 @@ func TestActivateAsNode(t *testing.T) {
|
||||
func TestTriggerNodeUpdate(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
peers := []peer.Peer{
|
||||
{PublicEndpoint: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}},
|
||||
{PublicEndpoint: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}},
|
||||
{PublicIP: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}},
|
||||
{PublicIP: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
@ -179,7 +179,7 @@ func TestTriggerNodeUpdate(t *testing.T) {
|
||||
getUpdateErr: tc.getUpdateErr,
|
||||
}
|
||||
vpnproto.RegisterAPIServer(vserver, vapi)
|
||||
go vserver.Serve(dialer.GetListener(net.JoinHostPort(core.GetCoordinatorVPNIP(), vpnAPIPort)))
|
||||
go vserver.Serve(dialer.GetListener(net.JoinHostPort("10.118.0.1", vpnAPIPort)))
|
||||
defer vserver.GracefulStop()
|
||||
|
||||
_, err := api.TriggerNodeUpdate(context.Background(), &pubproto.TriggerNodeUpdateRequest{})
|
||||
@ -258,10 +258,10 @@ func TestJoinCluster(t *testing.T) {
|
||||
getJoinArgsErr: tc.getJoinArgsErr,
|
||||
}
|
||||
vpnproto.RegisterAPIServer(vserver, vapi)
|
||||
go vserver.Serve(dialer.GetListener(net.JoinHostPort(core.GetCoordinatorVPNIP(), vpnAPIPort)))
|
||||
go vserver.Serve(dialer.GetListener(net.JoinHostPort("192.0.2.1", vpnAPIPort)))
|
||||
defer vserver.GracefulStop()
|
||||
|
||||
_, err := api.JoinCluster(context.Background(), &pubproto.JoinClusterRequest{})
|
||||
_, err := api.JoinCluster(context.Background(), &pubproto.JoinClusterRequest{CoordinatorVpnIp: "192.0.2.1"})
|
||||
|
||||
assert.Equal(tc.expectedState, core.state)
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -9,8 +9,11 @@ service API {
|
||||
rpc ActivateAsCoordinator(ActivateAsCoordinatorRequest) returns (stream ActivateAsCoordinatorResponse);
|
||||
rpc ActivateAsNode(ActivateAsNodeRequest) returns (ActivateAsNodeResponse);
|
||||
rpc ActivateAdditionalNodes(ActivateAdditionalNodesRequest) returns (stream ActivateAdditionalNodesResponse);
|
||||
rpc ActivateAsAdditionalCoordinator(ActivateAsAdditionalCoordinatorRequest) returns (ActivateAsAdditionalCoordinatorResponse);
|
||||
rpc ActivateAdditionalCoordinator(ActivateAdditionalCoordinatorRequest) returns (ActivateAdditionalCoordinatorResponse);
|
||||
rpc JoinCluster(JoinClusterRequest) returns (JoinClusterResponse);
|
||||
rpc TriggerNodeUpdate(TriggerNodeUpdateRequest) returns (TriggerNodeUpdateResponse);
|
||||
rpc TriggerCoordinatorUpdate(TriggerCoordinatorUpdateRequest) returns (TriggerCoordinatorUpdateResponse);
|
||||
rpc RequestStateDiskKey(RequestStateDiskKeyRequest) returns (RequestStateDiskKeyResponse);
|
||||
}
|
||||
|
||||
@ -23,7 +26,7 @@ message GetStateResponse {
|
||||
|
||||
message ActivateAsCoordinatorRequest {
|
||||
bytes admin_vpn_pub_key = 1;
|
||||
repeated string node_public_endpoints = 2;
|
||||
repeated string node_public_ips = 2;
|
||||
repeated string autoscaling_node_groups = 3;
|
||||
bytes master_secret = 4;
|
||||
string kms_uri = 5;
|
||||
@ -52,14 +55,33 @@ message ActivateAsNodeResponse {
|
||||
}
|
||||
|
||||
message ActivateAdditionalNodesRequest {
|
||||
repeated string node_public_endpoints = 1;
|
||||
repeated string node_public_ips = 1;
|
||||
}
|
||||
|
||||
message ActivateAdditionalNodesResponse {
|
||||
Log log = 1;
|
||||
}
|
||||
|
||||
message ActivateAsAdditionalCoordinatorRequest {
|
||||
string assigned_vpn_ip = 1;
|
||||
Peer activating_coordinator_data = 2;
|
||||
bytes owner_id = 3;
|
||||
bytes cluster_id = 4;
|
||||
}
|
||||
|
||||
message ActivateAsAdditionalCoordinatorResponse {
|
||||
}
|
||||
|
||||
message ActivateAdditionalCoordinatorRequest {
|
||||
string coordinator_public_ip = 1;
|
||||
}
|
||||
|
||||
message ActivateAdditionalCoordinatorResponse {
|
||||
|
||||
}
|
||||
|
||||
message JoinClusterRequest {
|
||||
string coordinator_vpn_ip = 1;
|
||||
}
|
||||
|
||||
message JoinClusterResponse {
|
||||
@ -71,6 +93,12 @@ message TriggerNodeUpdateRequest {
|
||||
message TriggerNodeUpdateResponse {
|
||||
}
|
||||
|
||||
message TriggerCoordinatorUpdateRequest {
|
||||
}
|
||||
|
||||
message TriggerCoordinatorUpdateResponse {
|
||||
}
|
||||
|
||||
message RequestStateDiskKeyRequest {
|
||||
string disk_uuid = 1;
|
||||
}
|
||||
@ -91,7 +119,7 @@ message Log {
|
||||
}
|
||||
|
||||
message Peer {
|
||||
string public_endpoint = 1;
|
||||
string public_ip = 1;
|
||||
string vpn_ip = 2;
|
||||
bytes vpn_pub_key = 3;
|
||||
uint32 role = 4;
|
||||
|
@ -22,8 +22,11 @@ type APIClient interface {
|
||||
ActivateAsCoordinator(ctx context.Context, in *ActivateAsCoordinatorRequest, opts ...grpc.CallOption) (API_ActivateAsCoordinatorClient, error)
|
||||
ActivateAsNode(ctx context.Context, in *ActivateAsNodeRequest, opts ...grpc.CallOption) (*ActivateAsNodeResponse, error)
|
||||
ActivateAdditionalNodes(ctx context.Context, in *ActivateAdditionalNodesRequest, opts ...grpc.CallOption) (API_ActivateAdditionalNodesClient, error)
|
||||
ActivateAsAdditionalCoordinator(ctx context.Context, in *ActivateAsAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAsAdditionalCoordinatorResponse, error)
|
||||
ActivateAdditionalCoordinator(ctx context.Context, in *ActivateAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAdditionalCoordinatorResponse, error)
|
||||
JoinCluster(ctx context.Context, in *JoinClusterRequest, opts ...grpc.CallOption) (*JoinClusterResponse, error)
|
||||
TriggerNodeUpdate(ctx context.Context, in *TriggerNodeUpdateRequest, opts ...grpc.CallOption) (*TriggerNodeUpdateResponse, error)
|
||||
TriggerCoordinatorUpdate(ctx context.Context, in *TriggerCoordinatorUpdateRequest, opts ...grpc.CallOption) (*TriggerCoordinatorUpdateResponse, error)
|
||||
RequestStateDiskKey(ctx context.Context, in *RequestStateDiskKeyRequest, opts ...grpc.CallOption) (*RequestStateDiskKeyResponse, error)
|
||||
}
|
||||
|
||||
@ -117,6 +120,24 @@ func (x *aPIActivateAdditionalNodesClient) Recv() (*ActivateAdditionalNodesRespo
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) ActivateAsAdditionalCoordinator(ctx context.Context, in *ActivateAsAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAsAdditionalCoordinatorResponse, error) {
|
||||
out := new(ActivateAsAdditionalCoordinatorResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/ActivateAsAdditionalCoordinator", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) ActivateAdditionalCoordinator(ctx context.Context, in *ActivateAdditionalCoordinatorRequest, opts ...grpc.CallOption) (*ActivateAdditionalCoordinatorResponse, error) {
|
||||
out := new(ActivateAdditionalCoordinatorResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/ActivateAdditionalCoordinator", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) JoinCluster(ctx context.Context, in *JoinClusterRequest, opts ...grpc.CallOption) (*JoinClusterResponse, error) {
|
||||
out := new(JoinClusterResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/JoinCluster", in, out, opts...)
|
||||
@ -135,6 +156,15 @@ func (c *aPIClient) TriggerNodeUpdate(ctx context.Context, in *TriggerNodeUpdate
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) TriggerCoordinatorUpdate(ctx context.Context, in *TriggerCoordinatorUpdateRequest, opts ...grpc.CallOption) (*TriggerCoordinatorUpdateResponse, error) {
|
||||
out := new(TriggerCoordinatorUpdateResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/TriggerCoordinatorUpdate", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) RequestStateDiskKey(ctx context.Context, in *RequestStateDiskKeyRequest, opts ...grpc.CallOption) (*RequestStateDiskKeyResponse, error) {
|
||||
out := new(RequestStateDiskKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/RequestStateDiskKey", in, out, opts...)
|
||||
@ -152,8 +182,11 @@ type APIServer interface {
|
||||
ActivateAsCoordinator(*ActivateAsCoordinatorRequest, API_ActivateAsCoordinatorServer) error
|
||||
ActivateAsNode(context.Context, *ActivateAsNodeRequest) (*ActivateAsNodeResponse, error)
|
||||
ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error
|
||||
ActivateAsAdditionalCoordinator(context.Context, *ActivateAsAdditionalCoordinatorRequest) (*ActivateAsAdditionalCoordinatorResponse, error)
|
||||
ActivateAdditionalCoordinator(context.Context, *ActivateAdditionalCoordinatorRequest) (*ActivateAdditionalCoordinatorResponse, error)
|
||||
JoinCluster(context.Context, *JoinClusterRequest) (*JoinClusterResponse, error)
|
||||
TriggerNodeUpdate(context.Context, *TriggerNodeUpdateRequest) (*TriggerNodeUpdateResponse, error)
|
||||
TriggerCoordinatorUpdate(context.Context, *TriggerCoordinatorUpdateRequest) (*TriggerCoordinatorUpdateResponse, error)
|
||||
RequestStateDiskKey(context.Context, *RequestStateDiskKeyRequest) (*RequestStateDiskKeyResponse, error)
|
||||
mustEmbedUnimplementedAPIServer()
|
||||
}
|
||||
@ -174,12 +207,21 @@ func (UnimplementedAPIServer) ActivateAsNode(context.Context, *ActivateAsNodeReq
|
||||
func (UnimplementedAPIServer) ActivateAdditionalNodes(*ActivateAdditionalNodesRequest, API_ActivateAdditionalNodesServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method ActivateAdditionalNodes not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) ActivateAsAdditionalCoordinator(context.Context, *ActivateAsAdditionalCoordinatorRequest) (*ActivateAsAdditionalCoordinatorResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ActivateAsAdditionalCoordinator not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) ActivateAdditionalCoordinator(context.Context, *ActivateAdditionalCoordinatorRequest) (*ActivateAdditionalCoordinatorResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ActivateAdditionalCoordinator not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) JoinCluster(context.Context, *JoinClusterRequest) (*JoinClusterResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method JoinCluster not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) TriggerNodeUpdate(context.Context, *TriggerNodeUpdateRequest) (*TriggerNodeUpdateResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method TriggerNodeUpdate not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) TriggerCoordinatorUpdate(context.Context, *TriggerCoordinatorUpdateRequest) (*TriggerCoordinatorUpdateResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method TriggerCoordinatorUpdate not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) RequestStateDiskKey(context.Context, *RequestStateDiskKeyRequest) (*RequestStateDiskKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RequestStateDiskKey not implemented")
|
||||
}
|
||||
@ -274,6 +316,42 @@ func (x *aPIActivateAdditionalNodesServer) Send(m *ActivateAdditionalNodesRespon
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _API_ActivateAsAdditionalCoordinator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ActivateAsAdditionalCoordinatorRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).ActivateAsAdditionalCoordinator(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/ActivateAsAdditionalCoordinator",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).ActivateAsAdditionalCoordinator(ctx, req.(*ActivateAsAdditionalCoordinatorRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_ActivateAdditionalCoordinator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ActivateAdditionalCoordinatorRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).ActivateAdditionalCoordinator(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/ActivateAdditionalCoordinator",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).ActivateAdditionalCoordinator(ctx, req.(*ActivateAdditionalCoordinatorRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_JoinCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(JoinClusterRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@ -310,6 +388,24 @@ func _API_TriggerNodeUpdate_Handler(srv interface{}, ctx context.Context, dec fu
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_TriggerCoordinatorUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(TriggerCoordinatorUpdateRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).TriggerCoordinatorUpdate(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/TriggerCoordinatorUpdate",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).TriggerCoordinatorUpdate(ctx, req.(*TriggerCoordinatorUpdateRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_RequestStateDiskKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RequestStateDiskKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@ -343,6 +439,14 @@ var API_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "ActivateAsNode",
|
||||
Handler: _API_ActivateAsNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ActivateAsAdditionalCoordinator",
|
||||
Handler: _API_ActivateAsAdditionalCoordinator_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ActivateAdditionalCoordinator",
|
||||
Handler: _API_ActivateAdditionalCoordinator_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "JoinCluster",
|
||||
Handler: _API_JoinCluster_Handler,
|
||||
@ -351,6 +455,10 @@ var API_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "TriggerNodeUpdate",
|
||||
Handler: _API_TriggerNodeUpdate_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "TriggerCoordinatorUpdate",
|
||||
Handler: _API_TriggerCoordinatorUpdate_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RequestStateDiskKey",
|
||||
Handler: _API_RequestStateDiskKey_Handler,
|
||||
|
@ -14,6 +14,7 @@ const (
|
||||
Unknown Role = iota
|
||||
Coordinator
|
||||
Node
|
||||
Admin
|
||||
)
|
||||
|
||||
// MarshalJSON marshals the Role to JSON string.
|
||||
@ -30,9 +31,10 @@ func (r *Role) UnmarshalJSON(b []byte) error {
|
||||
switch strings.ToLower(roleString) {
|
||||
case "coordinator":
|
||||
*r = Coordinator
|
||||
|
||||
case "node":
|
||||
*r = Node
|
||||
case "admin":
|
||||
*r = Admin
|
||||
default:
|
||||
*r = Unknown
|
||||
}
|
||||
|
@ -11,11 +11,12 @@ func _() {
|
||||
_ = x[Unknown-0]
|
||||
_ = x[Coordinator-1]
|
||||
_ = x[Node-2]
|
||||
_ = x[Admin-3]
|
||||
}
|
||||
|
||||
const _Role_name = "UnknownCoordinatorNode"
|
||||
const _Role_name = "UnknownCoordinatorNodeAdmin"
|
||||
|
||||
var _Role_index = [...]uint8{0, 7, 18, 22}
|
||||
var _Role_index = [...]uint8{0, 7, 18, 22, 27}
|
||||
|
||||
func (i Role) String() string {
|
||||
if i >= Role(len(_Role_index)-1) {
|
||||
|
@ -21,6 +21,10 @@ func TestMarshal(t *testing.T) {
|
||||
role: Node,
|
||||
jsonExpected: `"Node"`,
|
||||
},
|
||||
"admin role": {
|
||||
role: Admin,
|
||||
jsonExpected: `"Admin"`,
|
||||
},
|
||||
"unknown role": {
|
||||
role: Unknown,
|
||||
jsonExpected: `"Unknown"`,
|
||||
@ -66,6 +70,14 @@ func TestUnmarshal(t *testing.T) {
|
||||
json: `"node"`,
|
||||
expectedRole: Node,
|
||||
},
|
||||
"Admin can be unmarshaled": {
|
||||
json: `"Admin"`,
|
||||
expectedRole: Admin,
|
||||
},
|
||||
"lowercase admin can be unmarshaled": {
|
||||
json: `"admin"`,
|
||||
expectedRole: Admin,
|
||||
},
|
||||
"other strings unmarshal to the unknown role": {
|
||||
json: `"anything"`,
|
||||
expectedRole: Unknown,
|
||||
|
@ -28,7 +28,6 @@ const (
|
||||
keyVPNPubKey = "vpnKey"
|
||||
keyKEKID = "kekID"
|
||||
prefixFreeCoordinatorIPs = "freeCoordinatorVPNIPs"
|
||||
prefixAdminLocation = "externalAdminsData"
|
||||
prefixPeerLocation = "peerPrefix"
|
||||
prefixFreeNodeIPs = "freeNodeVPNIPs"
|
||||
)
|
||||
@ -98,17 +97,6 @@ func (s StoreWrapper) RemovePeer(peer peer.Peer) error {
|
||||
return s.Store.Delete(prefixPeerLocation + peer.VPNIP)
|
||||
}
|
||||
|
||||
// GetPeer returns a peer requested by the given VPN IP address.
|
||||
func (s StoreWrapper) GetPeer(vpnIP string) (peer.Peer, error) {
|
||||
bytePeer, err := s.Store.Get(prefixPeerLocation + vpnIP)
|
||||
if err != nil {
|
||||
return peer.Peer{}, err
|
||||
}
|
||||
var peer peer.Peer
|
||||
err = json.Unmarshal(bytePeer, &peer)
|
||||
return peer, err
|
||||
}
|
||||
|
||||
// GetPeers returns all peers in the store.
|
||||
func (s StoreWrapper) GetPeers() ([]peer.Peer, error) {
|
||||
return s.getPeersByPrefix(prefixPeerLocation)
|
||||
@ -169,7 +157,7 @@ func (s StoreWrapper) UpdatePeers(peers []peer.Peer) (added, removed []peer.Peer
|
||||
}
|
||||
|
||||
if updPeer, ok := updatedPeers[storedPeer.VPNIP]; ok {
|
||||
if updPeer.PublicEndpoint != storedPeer.PublicEndpoint || !bytes.Equal(updPeer.VPNPubKey, storedPeer.VPNPubKey) {
|
||||
if updPeer.PublicIP != storedPeer.PublicIP || !bytes.Equal(updPeer.VPNPubKey, storedPeer.VPNPubKey) {
|
||||
// stored peer must be updated, so mark for addition AND removal
|
||||
added = append(added, updPeer)
|
||||
removed = append(removed, storedPeer)
|
||||
@ -205,37 +193,6 @@ func (s StoreWrapper) UpdatePeers(peers []peer.Peer) (added, removed []peer.Peer
|
||||
return added, removed, nil
|
||||
}
|
||||
|
||||
// PutAdmin puts a single admin in the store, with a unique key derived form the VPNIP.
|
||||
func (s StoreWrapper) PutAdmin(peer peer.AdminData) error {
|
||||
jsonPeer, err := json.Marshal(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Store.Put(prefixAdminLocation+peer.VPNIP, jsonPeer)
|
||||
}
|
||||
|
||||
// GetAdmin gets a single admin from the store.
|
||||
// TODO: extend if we want to have multiple admins.
|
||||
func (s StoreWrapper) GetAdmin() (peer.AdminData, error) {
|
||||
iter, err := s.Store.Iterator(prefixAdminLocation)
|
||||
if err != nil {
|
||||
return peer.AdminData{}, err
|
||||
}
|
||||
key, err := iter.GetNext()
|
||||
if err != nil {
|
||||
return peer.AdminData{}, err
|
||||
}
|
||||
value, err := s.Store.Get(key)
|
||||
if err != nil {
|
||||
return peer.AdminData{}, err
|
||||
}
|
||||
var adminData peer.AdminData
|
||||
if err := json.Unmarshal(value, &adminData); err != nil {
|
||||
return peer.AdminData{}, err
|
||||
}
|
||||
return adminData, nil
|
||||
}
|
||||
|
||||
func (s StoreWrapper) getPeersByPrefix(prefix string) ([]peer.Peer, error) {
|
||||
peerKeys, err := s.Store.Iterator(prefix)
|
||||
if err != nil {
|
||||
|
@ -123,22 +123,22 @@ func TestStoreWrapperPeerInterface(t *testing.T) {
|
||||
internalIP := "10.118.2.0"
|
||||
|
||||
validPeer := peer.Peer{
|
||||
PublicEndpoint: ip,
|
||||
VPNPubKey: key[:],
|
||||
VPNIP: internalIP,
|
||||
PublicIP: ip,
|
||||
VPNPubKey: key[:],
|
||||
VPNIP: internalIP,
|
||||
}
|
||||
require.NoError(stwrapper.PutPeer(validPeer))
|
||||
data, err := stwrapper.GetPeers()
|
||||
require.NoError(err)
|
||||
require.Equal(1, len(data))
|
||||
assert.Equal(ip, data[0].PublicEndpoint)
|
||||
assert.Equal(ip, data[0].PublicIP)
|
||||
assert.Equal(key[:], data[0].VPNPubKey)
|
||||
assert.Equal(internalIP, data[0].VPNIP)
|
||||
|
||||
invalidPeer := peer.Peer{
|
||||
PublicEndpoint: ip,
|
||||
VPNPubKey: key[:],
|
||||
VPNIP: "",
|
||||
PublicIP: ip,
|
||||
VPNPubKey: key[:],
|
||||
VPNIP: "",
|
||||
}
|
||||
assert.Error(stwrapper.PutPeer(invalidPeer))
|
||||
}
|
||||
|
@ -26,9 +26,9 @@ func TestMain(m *testing.M) {
|
||||
func TestGetUpdate(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
clientIP := &net.IPAddr{IP: net.ParseIP("192.0.2.1")}
|
||||
peer1 := peer.Peer{PublicEndpoint: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}}
|
||||
peer2 := peer.Peer{PublicEndpoint: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}}
|
||||
peer3 := peer.Peer{PublicEndpoint: "192.0.2.13:2000", VPNIP: "192.0.2.23", VPNPubKey: []byte{3, 4, 5}}
|
||||
peer1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "192.0.2.21", VPNPubKey: []byte{1, 2, 3}}
|
||||
peer2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "192.0.2.22", VPNPubKey: []byte{2, 3, 4}}
|
||||
peer3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "192.0.2.23", VPNPubKey: []byte{3, 4, 5}}
|
||||
|
||||
testCases := map[string]struct {
|
||||
clientAddr net.Addr
|
||||
@ -95,7 +95,7 @@ func TestGetUpdate(t *testing.T) {
|
||||
require.Len(resp.Peers, len(tc.peers))
|
||||
for i, actual := range resp.Peers {
|
||||
expected := tc.peers[i]
|
||||
assert.EqualValues(expected.PublicEndpoint, actual.PublicEndpoint)
|
||||
assert.EqualValues(expected.PublicIP, actual.PublicIp)
|
||||
assert.EqualValues(expected.VPNIP, actual.VpnIp)
|
||||
assert.Equal(expected.VPNPubKey, actual.VpnPubKey)
|
||||
}
|
||||
|
@ -228,10 +228,10 @@ type Peer struct {
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PublicEndpoint string `protobuf:"bytes,1,opt,name=public_endpoint,json=publicEndpoint,proto3" json:"public_endpoint,omitempty"`
|
||||
VpnIp string `protobuf:"bytes,2,opt,name=vpn_ip,json=vpnIp,proto3" json:"vpn_ip,omitempty"`
|
||||
VpnPubKey []byte `protobuf:"bytes,3,opt,name=vpn_pub_key,json=vpnPubKey,proto3" json:"vpn_pub_key,omitempty"`
|
||||
Role uint32 `protobuf:"varint,4,opt,name=role,proto3" json:"role,omitempty"`
|
||||
PublicIp string `protobuf:"bytes,1,opt,name=public_ip,json=publicIp,proto3" json:"public_ip,omitempty"`
|
||||
VpnIp string `protobuf:"bytes,2,opt,name=vpn_ip,json=vpnIp,proto3" json:"vpn_ip,omitempty"`
|
||||
VpnPubKey []byte `protobuf:"bytes,3,opt,name=vpn_pub_key,json=vpnPubKey,proto3" json:"vpn_pub_key,omitempty"`
|
||||
Role uint32 `protobuf:"varint,4,opt,name=role,proto3" json:"role,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Peer) Reset() {
|
||||
@ -266,9 +266,9 @@ func (*Peer) Descriptor() ([]byte, []int) {
|
||||
return file_vpnapi_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *Peer) GetPublicEndpoint() string {
|
||||
func (x *Peer) GetPublicIp() string {
|
||||
if x != nil {
|
||||
return x.PublicEndpoint
|
||||
return x.PublicIp
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@ -422,41 +422,40 @@ var file_vpnapi_proto_rawDesc = []byte{
|
||||
0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x68,
|
||||
0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x69, 0x73, 0x63, 0x6f,
|
||||
0x76, 0x65, 0x72, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x43, 0x61, 0x43, 0x65, 0x72, 0x74, 0x48,
|
||||
0x61, 0x73, 0x68, 0x22, 0x7a, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x70,
|
||||
0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70,
|
||||
0x6f, 0x69, 0x6e, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x76, 0x70, 0x6e, 0x5f, 0x69, 0x70, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x70, 0x6e, 0x49, 0x70, 0x12, 0x1e, 0x0a, 0x0b, 0x76,
|
||||
0x70, 0x6e, 0x5f, 0x70, 0x75, 0x62, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x52, 0x09, 0x76, 0x70, 0x6e, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x72,
|
||||
0x6f, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x22,
|
||||
0x4b, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79,
|
||||
0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x4b,
|
||||
0x65, 0x79, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x2f, 0x0a, 0x12,
|
||||
0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x32, 0xdd, 0x01,
|
||||
0x0a, 0x03, 0x41, 0x50, 0x49, 0x12, 0x40, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x55, 0x70, 0x64, 0x61,
|
||||
0x74, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x55,
|
||||
0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76,
|
||||
0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4b, 0x38,
|
||||
0x73, 0x4a, 0x6f, 0x69, 0x6e, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x70, 0x6e, 0x61,
|
||||
0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x38, 0x73, 0x4a, 0x6f, 0x69, 0x6e, 0x41, 0x72, 0x67,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70,
|
||||
0x69, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x38, 0x73, 0x4a, 0x6f, 0x69, 0x6e, 0x41, 0x72, 0x67, 0x73,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x44,
|
||||
0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x12, 0x19, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e,
|
||||
0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x1a, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61,
|
||||
0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a,
|
||||
0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65,
|
||||
0x6c, 0x65, 0x73, 0x73, 0x73, 0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x6f,
|
||||
0x72, 0x2f, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x70, 0x6e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x61, 0x73, 0x68, 0x22, 0x6e, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70,
|
||||
0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
|
||||
0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x49, 0x70, 0x12, 0x15, 0x0a, 0x06, 0x76, 0x70, 0x6e, 0x5f,
|
||||
0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x70, 0x6e, 0x49, 0x70, 0x12,
|
||||
0x1e, 0x0a, 0x0b, 0x76, 0x70, 0x6e, 0x5f, 0x70, 0x75, 0x62, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x76, 0x70, 0x6e, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12,
|
||||
0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x72,
|
||||
0x6f, 0x6c, 0x65, 0x22, 0x4b, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61,
|
||||
0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64,
|
||||
0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
|
||||
0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
|
||||
0x22, 0x2f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b,
|
||||
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65,
|
||||
0x79, 0x32, 0xdd, 0x01, 0x0a, 0x03, 0x41, 0x50, 0x49, 0x12, 0x40, 0x0a, 0x09, 0x47, 0x65, 0x74,
|
||||
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e,
|
||||
0x47, 0x65, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x19, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x64,
|
||||
0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x47,
|
||||
0x65, 0x74, 0x4b, 0x38, 0x73, 0x4a, 0x6f, 0x69, 0x6e, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x2e,
|
||||
0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x38, 0x73, 0x4a, 0x6f, 0x69,
|
||||
0x6e, 0x41, 0x72, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76,
|
||||
0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x38, 0x73, 0x4a, 0x6f, 0x69, 0x6e,
|
||||
0x41, 0x72, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x0a,
|
||||
0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x12, 0x19, 0x2e, 0x76, 0x70, 0x6e,
|
||||
0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x47,
|
||||
0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x73, 0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
|
||||
0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6f, 0x72, 0x64, 0x69,
|
||||
0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x70, 0x6e, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x70, 0x6e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -29,7 +29,7 @@ message GetK8sJoinArgsResponse {
|
||||
}
|
||||
|
||||
message Peer {
|
||||
string public_endpoint = 1;
|
||||
string public_ip = 1;
|
||||
string vpn_ip = 2;
|
||||
bytes vpn_pub_key = 3;
|
||||
uint32 role = 4;
|
||||
|
@ -22,8 +22,7 @@ const (
|
||||
)
|
||||
|
||||
type Wireguard struct {
|
||||
client wgClient
|
||||
getInterfaceIP func(string) (string, error)
|
||||
client wgClient
|
||||
}
|
||||
|
||||
func New() (*Wireguard, error) {
|
||||
@ -31,7 +30,7 @@ func New() (*Wireguard, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Wireguard{client: client, getInterfaceIP: util.GetInterfaceIP}, nil
|
||||
return &Wireguard{client: client}, nil
|
||||
}
|
||||
|
||||
func (w *Wireguard) Setup(privKey []byte) ([]byte, error) {
|
||||
@ -73,7 +72,7 @@ func (w *Wireguard) GetPublicKey(privKey []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (w *Wireguard) GetInterfaceIP() (string, error) {
|
||||
return w.getInterfaceIP(netInterface)
|
||||
return util.GetInterfaceIP(netInterface)
|
||||
}
|
||||
|
||||
// SetInterfaceIP sets the ip interface ip.
|
||||
@ -146,11 +145,7 @@ func prettyWgError(err error) error {
|
||||
}
|
||||
|
||||
func (w *Wireguard) UpdatePeers(peers []peer.Peer) error {
|
||||
ownVPNIP, err := w.getInterfaceIP(netInterface)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain vpn ip: %w", err)
|
||||
}
|
||||
wgPeers, err := transformToWgpeer(peers, ownVPNIP)
|
||||
wgPeers, err := transformToWgpeer(peers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to transform peers to wireguard-peers: %w", err)
|
||||
}
|
||||
@ -166,13 +161,9 @@ func (w *Wireguard) UpdatePeers(peers []peer.Peer) error {
|
||||
}
|
||||
var added []wgtypes.Peer
|
||||
var removed []wgtypes.Peer
|
||||
var updated []wgtypes.Peer
|
||||
|
||||
for _, interfacePeer := range deviceData.Peers {
|
||||
if updPeer, ok := storePeers[interfacePeer.AllowedIPs[0].String()]; ok {
|
||||
if updPeer.Endpoint.String() != interfacePeer.Endpoint.String() {
|
||||
updated = append(updated, updPeer)
|
||||
}
|
||||
if !bytes.Equal(updPeer.PublicKey[:], interfacePeer.PublicKey[:]) {
|
||||
added = append(added, updPeer)
|
||||
removed = append(removed, interfacePeer)
|
||||
@ -196,14 +187,6 @@ func (w *Wireguard) UpdatePeers(peers []peer.Peer) error {
|
||||
Remove: true,
|
||||
})
|
||||
}
|
||||
for _, peer := range updated {
|
||||
newPeerConfig = append(newPeerConfig, wgtypes.PeerConfig{
|
||||
PublicKey: peer.PublicKey,
|
||||
Remove: false,
|
||||
UpdateOnly: true,
|
||||
Endpoint: peer.Endpoint,
|
||||
})
|
||||
}
|
||||
for _, peer := range added {
|
||||
newPeerConfig = append(newPeerConfig, wgtypes.PeerConfig{
|
||||
PublicKey: peer.PublicKey,
|
||||
@ -236,12 +219,9 @@ type wgClient interface {
|
||||
ConfigureDevice(name string, cfg wgtypes.Config) error
|
||||
}
|
||||
|
||||
func transformToWgpeer(corePeers []peer.Peer, excludedIP string) ([]wgtypes.Peer, error) {
|
||||
func transformToWgpeer(corePeers []peer.Peer) ([]wgtypes.Peer, error) {
|
||||
var wgPeers []wgtypes.Peer
|
||||
for _, peer := range corePeers {
|
||||
if peer.VPNIP == excludedIP {
|
||||
continue
|
||||
}
|
||||
key, err := wgtypes.NewKey(peer.VPNPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -250,13 +230,8 @@ func transformToWgpeer(corePeers []peer.Peer, excludedIP string) ([]wgtypes.Peer
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
publicIP, _, err := net.SplitHostPort(peer.PublicEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var endpoint *net.UDPAddr
|
||||
if ip := net.ParseIP(publicIP); ip != nil {
|
||||
if ip := net.ParseIP(peer.PublicIP); ip != nil {
|
||||
endpoint = &net.UDPAddr{IP: ip, Port: port}
|
||||
}
|
||||
wgPeers = append(wgPeers, wgtypes.Peer{
|
||||
|
@ -15,20 +15,20 @@ func TestUpdatePeer(t *testing.T) {
|
||||
|
||||
firstKey, err := wgtypes.GenerateKey()
|
||||
requirePre.NoError(err)
|
||||
peer1 := peer.Peer{PublicEndpoint: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: firstKey[:]}
|
||||
peer1 := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "192.0.2.21", VPNPubKey: firstKey[:]}
|
||||
firstKeyUpd, err := wgtypes.GenerateKey()
|
||||
requirePre.NoError(err)
|
||||
peer1KeyUpd := peer.Peer{PublicEndpoint: "192.0.2.11:2000", VPNIP: "192.0.2.21", VPNPubKey: firstKeyUpd[:]}
|
||||
peer1EndpUpd := peer.Peer{PublicEndpoint: "192.0.2.110:2000", VPNIP: "192.0.2.21", VPNPubKey: firstKey[:]}
|
||||
peer1KeyUpd := peer.Peer{PublicIP: "192.0.2.11", VPNIP: "192.0.2.21", VPNPubKey: firstKeyUpd[:]}
|
||||
secondKey, err := wgtypes.GenerateKey()
|
||||
requirePre.NoError(err)
|
||||
peer2 := peer.Peer{PublicEndpoint: "192.0.2.12:2000", VPNIP: "192.0.2.22", VPNPubKey: secondKey[:]}
|
||||
peer2 := peer.Peer{PublicIP: "192.0.2.12", VPNIP: "192.0.2.22", VPNPubKey: secondKey[:]}
|
||||
thirdKey, err := wgtypes.GenerateKey()
|
||||
requirePre.NoError(err)
|
||||
peer3 := peer.Peer{PublicEndpoint: "192.0.2.13:2000", VPNIP: "192.0.2.23", VPNPubKey: thirdKey[:]}
|
||||
peer3 := peer.Peer{PublicIP: "192.0.2.13", VPNIP: "192.0.2.23", VPNPubKey: thirdKey[:]}
|
||||
fourthKey, err := wgtypes.GenerateKey()
|
||||
requirePre.NoError(err)
|
||||
peerSelf := peer.Peer{PublicEndpoint: "192.0.2.10:2000", VPNIP: "192.0.2.20", VPNPubKey: fourthKey[:]}
|
||||
peerAdmin := peer.Peer{PublicIP: "192.0.2.10", VPNIP: "192.0.2.25", VPNPubKey: fourthKey[:]}
|
||||
peerAdminNoEndp := peer.Peer{VPNIP: "192.0.2.25", VPNPubKey: fourthKey[:]}
|
||||
|
||||
checkError := func(peers []wgtypes.Peer, err error) []wgtypes.Peer {
|
||||
requirePre.NoError(err)
|
||||
@ -38,37 +38,33 @@ func TestUpdatePeer(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
storePeers []peer.Peer
|
||||
vpnPeers []wgtypes.Peer
|
||||
excludedIP map[string]struct{}
|
||||
expectErr bool
|
||||
expectedVPNPeers []wgtypes.Peer
|
||||
}{
|
||||
"basic": {
|
||||
storePeers: []peer.Peer{peer1, peer3},
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2}, "")),
|
||||
expectedVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer3}, "")),
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2})),
|
||||
expectedVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer3})),
|
||||
},
|
||||
"previously empty": {
|
||||
storePeers: []peer.Peer{peer1, peer2},
|
||||
expectedVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2}, "")),
|
||||
expectedVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2})),
|
||||
},
|
||||
"no changes": {
|
||||
storePeers: []peer.Peer{peer1, peer2},
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2}, "")),
|
||||
expectedVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2}, "")),
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2})),
|
||||
expectedVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2})),
|
||||
},
|
||||
"key update": {
|
||||
storePeers: []peer.Peer{peer1KeyUpd, peer3},
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2}, "")),
|
||||
expectedVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1KeyUpd, peer3}, "")),
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2})),
|
||||
expectedVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1KeyUpd, peer3})),
|
||||
},
|
||||
"public endpoint update": {
|
||||
storePeers: []peer.Peer{peer1EndpUpd, peer3},
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peer1, peer2}, "")),
|
||||
expectedVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer1EndpUpd, peer3}, "")),
|
||||
},
|
||||
"dont add self": {
|
||||
storePeers: []peer.Peer{peerSelf, peer3},
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peer2, peer3}, "")),
|
||||
expectedVPNPeers: checkError(transformToWgpeer([]peer.Peer{peer3}, "")),
|
||||
"not update Endpoint changes": {
|
||||
storePeers: []peer.Peer{peerAdminNoEndp, peer3},
|
||||
vpnPeers: checkError(transformToWgpeer([]peer.Peer{peerAdmin, peer3})),
|
||||
expectedVPNPeers: checkError(transformToWgpeer([]peer.Peer{peerAdmin, peer3})),
|
||||
},
|
||||
}
|
||||
|
||||
@ -79,9 +75,7 @@ func TestUpdatePeer(t *testing.T) {
|
||||
|
||||
fakewg := fakewgClient{}
|
||||
fakewg.devices = make(map[string]*wgtypes.Device)
|
||||
wg := Wireguard{client: &fakewg, getInterfaceIP: func(s string) (string, error) {
|
||||
return "192.0.2.20", nil
|
||||
}}
|
||||
wg := Wireguard{client: &fakewg}
|
||||
|
||||
fakewg.devices[netInterface] = &wgtypes.Device{Peers: tc.vpnPeers}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user