coordinator-core: add multi coordinator Kubernetes integration (#39)

Signed-off-by: Benedict Schlueter <bs@edgeless.systems>
This commit is contained in:
Benedict Schlueter 2022-04-25 17:26:17 +02:00 committed by Benedict Schlüter
parent 0ac9617dac
commit 86178df205
19 changed files with 359 additions and 154 deletions

View file

@ -2,15 +2,18 @@ package pubapi
import (
"context"
"fmt"
"net"
"github.com/edgelesssys/constellation/coordinator/peer"
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
"github.com/edgelesssys/constellation/coordinator/role"
"github.com/edgelesssys/constellation/coordinator/state"
"github.com/edgelesssys/constellation/coordinator/vpnapi/vpnproto"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
// ActivateAsAdditionalCoordinator is the RPC call to activate subsequent coordinators.
@ -58,7 +61,21 @@ func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.
}
}()
// TODO: kubernetes information and join
a.logger.Info("retrieving k8s join information ")
joinArgs, certKey, err := a.getk8SCoordinatorJoinArgs(ctx, in.ActivatingCoordinatorData.VpnIp, vpnAPIPort)
if err != nil {
return nil, status.Errorf(codes.Internal, "error in getk8sJoinArgs: %v", err)
}
// Before we join the cluster we need to be able to communicate with ALL other control-planes
err = a.core.UpdatePeers(peer.FromPubProto(in.Peers))
if err != nil {
return nil, status.Errorf(codes.Internal, "add peers to vpn: %v", err)
}
a.logger.Info("about to join the k8s cluster")
err = a.core.JoinCluster(joinArgs, certKey, role.Coordinator)
if err != nil {
return nil, status.Errorf(codes.Internal, "%v", err)
}
// ATTENTION: STORE HAS TO BE EMPTY (NO OVERLAPPING KEYS) WHEN THIS FUNCTION IS CALLED
if err := a.core.SwitchToPersistentStore(); err != nil {
@ -118,7 +135,6 @@ func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.
if err != nil {
return nil, status.Errorf(codes.Internal, "get peers from store: %v", err)
}
a.logger.Info("", zap.Any("peers", peers))
for _, p := range peers {
if p.Role == role.Node {
if err := a.triggerNodeUpdate(p.PublicIP); err != nil {
@ -126,7 +142,6 @@ func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.
}
}
if p.Role == role.Coordinator && p.VPNIP != thisPeer.VPNIP {
a.logger.Info("update coordinator", zap.String("coordinator vpnIP", p.VPNIP))
if err := a.triggerCoordinatorUpdate(context.TODO(), p.PublicIP); err != nil {
a.logger.Error("triggerCoordinatorUpdate failed", zap.Error(err), zap.String("endpoint", p.PublicIP), zap.String("vpnip", p.VPNIP))
}
@ -137,51 +152,90 @@ func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.
}
func (a *API) ActivateAdditionalCoordinator(ctx context.Context, in *pubproto.ActivateAdditionalCoordinatorRequest) (*pubproto.ActivateAdditionalCoordinatorResponse, error) {
ctx, cancel := context.WithTimeout(ctx, deadlineDuration)
defer cancel()
if err := a.core.RequireState(state.ActivatingNodes); err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "coordinator is not in required state: %v", err)
}
assignedVPNIP, err := a.core.GetNextCoordinatorIP()
if err != nil {
return nil, status.Errorf(codes.Internal, "requesting new coordinator vpn IP address: %v", err)
}
vpnIP, err := a.core.GetVPNIP()
if err != nil {
return nil, status.Errorf(codes.Internal, "get own vpn IP address: %v", err)
}
thisPeer, err := a.assemblePeerStruct(vpnIP, role.Coordinator)
if err != nil {
return nil, status.Errorf(codes.Internal, "assembling coordinator peer struct: %v", err)
}
ownerID, clusterID, err := a.core.GetIDs(nil)
if err != nil {
return nil, status.Errorf(codes.Internal, "get owner and cluster ID: %v", err)
}
conn, err := a.dial(ctx, net.JoinHostPort(in.CoordinatorPublicIp, endpointAVPNPort))
if err != nil {
return nil, status.Errorf(codes.Internal, "dialing new coordinator: %v", err)
}
defer conn.Close()
client := pubproto.NewAPIClient(conn)
_, err = client.ActivateAsAdditionalCoordinator(ctx, &pubproto.ActivateAsAdditionalCoordinatorRequest{
AssignedVpnIp: assignedVPNIP,
ActivatingCoordinatorData: peer.ToPubProto([]peer.Peer{thisPeer})[0],
OwnerId: ownerID,
ClusterId: clusterID,
})
err := a.activateCoordinator(ctx, in.CoordinatorPublicIp)
if err != nil {
a.logger.Error("coordinator activation failed", zap.Error(err))
return nil, status.Errorf(codes.Internal, "activate new coordinator: %v", err)
}
return &pubproto.ActivateAdditionalCoordinatorResponse{}, nil
}
func (a *API) activateCoordinators(logToCLI logFunc, coordinatorPublicIPs []string) error {
// Activate all coordinators.
for num, coordinatorPublicIP := range coordinatorPublicIPs {
logToCLI("activating coordinator %3d out of %3d coordinators ...", num+2, len(coordinatorPublicIPs)+1)
if err := a.activateCoordinator(context.TODO(), coordinatorPublicIP); err != nil {
return err
}
}
return nil
}
func (a *API) activateCoordinator(ctx context.Context, coordinatorIP string) error {
ctx, cancel := context.WithTimeout(ctx, deadlineDuration)
defer cancel()
if err := a.core.RequireState(state.ActivatingNodes); err != nil {
return fmt.Errorf("coordinator is not in required state: %v", err)
}
assignedVPNIP, err := a.core.GetNextCoordinatorIP()
if err != nil {
return fmt.Errorf("requesting new coordinator vpn IP address: %v", err)
}
vpnIP, err := a.core.GetVPNIP()
if err != nil {
return fmt.Errorf("get own vpn IP address: %v", err)
}
thisPeer, err := a.assemblePeerStruct(vpnIP, role.Coordinator)
if err != nil {
return fmt.Errorf("assembling coordinator peer struct: %v", err)
}
ownerID, clusterID, err := a.core.GetIDs(nil)
if err != nil {
return fmt.Errorf("get owner and cluster ID: %v", err)
}
_, peers, err := a.core.GetPeers(0)
if err != nil {
return err
}
conn, err := a.dial(ctx, net.JoinHostPort(coordinatorIP, endpointAVPNPort))
if err != nil {
return fmt.Errorf("dialing new coordinator: %v", err)
}
defer conn.Close()
client := pubproto.NewAPIClient(conn)
// This call can be omitted, since this function will be called by the ToActivating Coordinator
// and he knows his own PubKey, so he can pass this as argument.
// TODO: Remove this gRPC function when we have working integration.
resp, err := client.GetPeerVPNPublicKey(ctx, &pubproto.GetPeerVPNPublicKeyRequest{})
if err != nil {
a.logger.Error("failed to get PubKey from new coordinator", zap.Error(err))
return err
}
newCoordinatorPeer := peer.Peer{VPNIP: assignedVPNIP, PublicIP: coordinatorIP, VPNPubKey: resp.CoordinatorPubKey, Role: role.Coordinator}
err = a.core.AddPeer(newCoordinatorPeer)
if err != nil {
a.logger.Error("failed to store new coordinator data", zap.Error(err))
return err
}
for _, p := range peers {
if p.Role == role.Coordinator && p.VPNIP != thisPeer.VPNIP {
if err := a.triggerCoordinatorUpdate(context.TODO(), p.PublicIP); err != nil {
a.logger.Error("triggerCoordinatorUpdate failed", zap.Error(err), zap.String("endpoint", p.PublicIP), zap.String("vpnip", p.VPNIP))
}
}
}
_, err = client.ActivateAsAdditionalCoordinator(ctx, &pubproto.ActivateAsAdditionalCoordinatorRequest{
AssignedVpnIp: assignedVPNIP,
ActivatingCoordinatorData: peer.ToPubProto([]peer.Peer{thisPeer})[0],
Peers: peer.ToPubProto(peers),
OwnerId: ownerID,
ClusterId: clusterID,
})
return err
}
func (a *API) TriggerCoordinatorUpdate(ctx context.Context, in *pubproto.TriggerCoordinatorUpdateRequest) (*pubproto.TriggerCoordinatorUpdateResponse, error) {
if err := a.core.RequireState(state.ActivatingNodes); err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "coordinator is not in required state for updating state: %v", err)
@ -202,6 +256,15 @@ func (a *API) TriggerCoordinatorUpdate(ctx context.Context, in *pubproto.Trigger
return &pubproto.TriggerCoordinatorUpdateResponse{}, nil
}
// GetPeerVPNPublicKey return the VPN publicKey of the peer.
func (a *API) GetPeerVPNPublicKey(ctx context.Context, in *pubproto.GetPeerVPNPublicKeyRequest) (*pubproto.GetPeerVPNPublicKeyResponse, error) {
key, err := a.core.GetVPNPubKey()
if err != nil {
return nil, status.Errorf(codes.Internal, "could not obtain VPNPubKey %v", err)
}
return &pubproto.GetPeerVPNPublicKeyResponse{CoordinatorPubKey: key}, nil
}
func (a *API) triggerCoordinatorUpdate(ctx context.Context, publicIP string) error {
ctx, cancel := context.WithTimeout(ctx, deadlineDuration)
defer cancel()
@ -219,3 +282,28 @@ func (a *API) triggerCoordinatorUpdate(ctx context.Context, publicIP string) err
return err
}
func (a *API) getk8SCoordinatorJoinArgs(ctx context.Context, coordinatorIP, port string) (*kubeadm.BootstrapTokenDiscovery, string, error) {
conn, err := a.dialInsecure(ctx, net.JoinHostPort(coordinatorIP, port))
if err != nil {
return nil, "", err
}
defer conn.Close()
client := vpnproto.NewAPIClient(conn)
// since the key has to generated every time, this gRPC induces ~1s overhead.
resp, err := client.GetK8SCertificateKey(ctx, &vpnproto.GetK8SCertificateKeyRequest{})
if err != nil {
return nil, "", err
}
joinArgs, err := client.GetK8SJoinArgs(ctx, &vpnproto.GetK8SJoinArgsRequest{})
if err != nil {
return nil, "", err
}
joinToken := &kubeadm.BootstrapTokenDiscovery{
Token: joinArgs.Token,
APIServerEndpoint: joinArgs.ApiServerEndpoint,
CACertHashes: []string{joinArgs.DiscoveryTokenCaCertHash},
}
return joinToken, resp.CertificateKey, err
}