mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-02-23 00:10:06 -05:00
increase gRPC error message verbosity (#62)
This commit is contained in:
parent
d869e10a85
commit
990ca20469
@ -22,7 +22,7 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
defer a.mut.Unlock()
|
||||
|
||||
if err := a.core.RequireState(state.AcceptingInit); err != nil {
|
||||
return status.Errorf(codes.FailedPrecondition, "%v", err)
|
||||
return status.Errorf(codes.FailedPrecondition, "node is not in required state: %v", err)
|
||||
}
|
||||
|
||||
if len(in.MasterSecret) == 0 {
|
||||
@ -54,7 +54,7 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
|
||||
// Any new additions to ActivateAsNode MUST come after
|
||||
if err := a.core.InitializeStoreIPs(); err != nil {
|
||||
return status.Errorf(codes.Internal, "failed to initialize store IPs %v", err)
|
||||
return status.Errorf(codes.Internal, "initialize store IPs: %v", err)
|
||||
}
|
||||
|
||||
ownerID, clusterID, err := a.core.GetIDs(in.MasterSecret)
|
||||
@ -62,39 +62,39 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
if err := a.core.AdvanceState(state.ActivatingNodes, ownerID, clusterID); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "advance state to ActivatingNodes: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
if err := a.core.SetUpKMS(ctx, in.StorageUri, in.KmsUri, in.KeyEncryptionKeyId, in.UseExistingKek); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "setting up KMS: %v", err)
|
||||
}
|
||||
vpnIP, err := a.core.GetNextCoordinatorIP()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "could not obtain coordinator vpn ip%v", err)
|
||||
return status.Errorf(codes.Internal, "get coordinator vpn IP address: %v", err)
|
||||
}
|
||||
coordPeer, err := a.assemblePeerStruct(vpnIP, role.Coordinator)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "assembling the coordinator peer struct: %v", err)
|
||||
}
|
||||
|
||||
if err := a.core.SetVPNIP(coordPeer.VPNIP); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "set the vpn IP address: %v", err)
|
||||
}
|
||||
if err := a.core.AddPeer(coordPeer); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "adding the coordinator to store/vpn: %v", err)
|
||||
}
|
||||
|
||||
logToCLI("Initializing Kubernetes ...")
|
||||
kubeconfig, err := a.core.InitCluster(in.AutoscalingNodeGroups, in.CloudServiceAccountUri)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "initializing kubernetes cluster failed: %v", err)
|
||||
}
|
||||
|
||||
// run the VPN-API server
|
||||
if err := a.vpnAPIServer.Listen(net.JoinHostPort(coordPeer.VPNIP, vpnAPIPort)); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "start vpnAPIServer: %v", err)
|
||||
}
|
||||
a.wgClose.Add(1)
|
||||
go func() {
|
||||
@ -106,20 +106,20 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
// TODO: check performance and maybe make concurrent
|
||||
if err := a.activateNodes(logToCLI, in.NodePublicIps); err != nil {
|
||||
a.logger.Error("node activation failed", zap.Error(err))
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "node initialization: %v", err)
|
||||
}
|
||||
|
||||
if err := a.core.SwitchToPersistentStore(); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "switch to persistent store: %v", err)
|
||||
}
|
||||
|
||||
// persist node state on disk
|
||||
if err := a.core.PersistNodeState(role.Coordinator, ownerID, clusterID); err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||
}
|
||||
adminVPNIP, err := a.core.GetNextNodeIP()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "requesting node IP address: %v", err)
|
||||
}
|
||||
// This effectively gives code execution, so we do this last.
|
||||
err = a.core.AddPeer(peer.Peer{
|
||||
@ -128,7 +128,7 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
Role: role.Admin,
|
||||
})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "add peer to store/vpn: %v", err)
|
||||
}
|
||||
|
||||
return srv.Send(&pubproto.ActivateAsCoordinatorResponse{
|
||||
@ -161,7 +161,7 @@ func (a *API) ActivateAdditionalNodes(in *pubproto.ActivateAdditionalNodesReques
|
||||
// TODO: check performance and maybe make concurrent
|
||||
if err := a.activateNodes(logToCLI, in.NodePublicIps); err != nil {
|
||||
a.logger.Error("node activation failed", zap.Error(err))
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
return status.Errorf(codes.Internal, "activating nodes: %v", err)
|
||||
}
|
||||
|
||||
return srv.Send(&pubproto.ActivateAdditionalNodesResponse{
|
||||
|
@ -21,7 +21,7 @@ func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.
|
||||
defer a.mut.Unlock()
|
||||
|
||||
if err := a.core.RequireState(state.AcceptingInit); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "node is not in required state: %v", err)
|
||||
}
|
||||
// Some of the following actions can't be reverted (yet). If there's an
|
||||
// error, we may be in a weird state. Thus, mark this peer as failed.
|
||||
@ -35,23 +35,23 @@ func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.
|
||||
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
|
||||
// Any new additions to ActivateAsAdditionalCoordinator MUST come after
|
||||
if err := a.core.AdvanceState(state.ActivatingNodes, in.OwnerId, in.ClusterId); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "advance state to ActivatingNodes: %v", err)
|
||||
}
|
||||
|
||||
// TODO: add KMS functions
|
||||
|
||||
// add one coordinator to the VPN
|
||||
if err := a.core.SetVPNIP(in.AssignedVpnIp); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "set vpn IP address: %v", err)
|
||||
}
|
||||
|
||||
if err := a.core.AddPeerToVPN(peer.FromPubProto([]*pubproto.Peer{in.ActivatingCoordinatorData})[0]); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "adding initial peers to vpn: %v", err)
|
||||
}
|
||||
|
||||
// run the VPN-API server
|
||||
if err := a.vpnAPIServer.Listen(net.JoinHostPort(in.AssignedVpnIp, vpnAPIPort)); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "start vpnAPIServer: %v", err)
|
||||
}
|
||||
a.wgClose.Add(1)
|
||||
go func() {
|
||||
@ -65,7 +65,7 @@ func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.
|
||||
|
||||
// ATTENTION: STORE HAS TO BE EMPTY (NO OVERLAPPING KEYS) WHEN THIS FUNCTION IS CALLED
|
||||
if err := a.core.SwitchToPersistentStore(); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "switch to persistent store: %v", err)
|
||||
}
|
||||
a.logger.Info("Transition to persistent store successful")
|
||||
|
||||
@ -74,28 +74,28 @@ func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.
|
||||
|
||||
thisPeer, err := a.assemblePeerStruct(in.AssignedVpnIp, role.Coordinator)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "assembling coordinator peer struct: %v", err)
|
||||
}
|
||||
if err := a.core.AddPeerToStore(thisPeer); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "adding new coordinator to persistent store: %v", err)
|
||||
}
|
||||
|
||||
resourceVersion, peers, err := a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "get peers from store: %v", err)
|
||||
}
|
||||
a.resourceVersion = resourceVersion
|
||||
|
||||
err = a.core.UpdatePeers(peers)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "synchronizing peers with vpn state: %v", err)
|
||||
}
|
||||
// Manually trigger an update operation on all peers.
|
||||
// This may be expendable in the future, depending on whether it's acceptable that it takes
|
||||
// some seconds until the nodes get all peer data via their regular update requests.
|
||||
_, peers, err = a.core.GetPeers(0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "get peers from store: %v", err)
|
||||
}
|
||||
a.logger.Info("", zap.Any("peers", peers))
|
||||
for _, p := range peers {
|
||||
@ -120,28 +120,28 @@ func (a *API) ActivateAdditionalCoordinator(ctx context.Context, in *pubproto.Ac
|
||||
defer cancel()
|
||||
|
||||
if err := a.core.RequireState(state.ActivatingNodes); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "coordinator is not in required state: %v", err)
|
||||
}
|
||||
assignedVPNIP, err := a.core.GetNextCoordinatorIP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, status.Errorf(codes.Internal, "requesting new coordinator vpn IP address: %v", err)
|
||||
}
|
||||
vpnIP, err := a.core.GetVPNIP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, status.Errorf(codes.Internal, "get own vpn IP address: %v", err)
|
||||
}
|
||||
thisPeer, err := a.assemblePeerStruct(vpnIP, role.Coordinator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, status.Errorf(codes.Internal, "assembling coordinator peer struct: %v", err)
|
||||
}
|
||||
ownerID, clusterID, err := a.core.GetIDs(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, status.Errorf(codes.Internal, "get owner and cluster ID: %v", err)
|
||||
}
|
||||
|
||||
conn, err := a.dial(ctx, net.JoinHostPort(in.CoordinatorPublicIp, endpointAVPNPort))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, status.Errorf(codes.Internal, "dialing new coordinator: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
@ -155,7 +155,7 @@ func (a *API) ActivateAdditionalCoordinator(ctx context.Context, in *pubproto.Ac
|
||||
})
|
||||
if err != nil {
|
||||
a.logger.Error("coordinator activation failed", zap.Error(err))
|
||||
return nil, err
|
||||
return nil, status.Errorf(codes.Internal, "activate new coordinator: %v", err)
|
||||
}
|
||||
|
||||
return &pubproto.ActivateAdditionalCoordinatorResponse{}, nil
|
||||
@ -163,19 +163,19 @@ func (a *API) ActivateAdditionalCoordinator(ctx context.Context, in *pubproto.Ac
|
||||
|
||||
func (a *API) TriggerCoordinatorUpdate(ctx context.Context, in *pubproto.TriggerCoordinatorUpdateRequest) (*pubproto.TriggerCoordinatorUpdateResponse, error) {
|
||||
if err := a.core.RequireState(state.ActivatingNodes); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "coordinator is not in required state for updating state: %v", err)
|
||||
}
|
||||
resourceVersion, peers, err := a.core.GetPeers(a.resourceVersion)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "get peers from store: %v", err)
|
||||
}
|
||||
if resourceVersion == a.resourceVersion {
|
||||
a.logger.Info("coordinator: ressource version identical, no need to update")
|
||||
a.logger.Info("ressource version identical, no need to update")
|
||||
return &pubproto.TriggerCoordinatorUpdateResponse{}, nil
|
||||
}
|
||||
err = a.core.UpdatePeers(peers)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "synchronizing peers with vpn state: %v", err)
|
||||
}
|
||||
a.resourceVersion = resourceVersion
|
||||
return &pubproto.TriggerCoordinatorUpdateResponse{}, nil
|
||||
|
@ -22,7 +22,7 @@ func (a *API) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeReq
|
||||
defer a.mut.Unlock()
|
||||
|
||||
if err := a.core.RequireState(state.AcceptingInit); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "node is not in required state for activation: %v", err)
|
||||
}
|
||||
|
||||
if len(in.OwnerId) == 0 || len(in.ClusterId) == 0 {
|
||||
@ -42,26 +42,26 @@ func (a *API) ActivateAsNode(ctx context.Context, in *pubproto.ActivateAsNodeReq
|
||||
// This ensures the node is marked as initialzed before the node is in a state that allows code execution
|
||||
// Any new additions to ActivateAsNode MUST come after
|
||||
if err := a.core.AdvanceState(state.NodeWaitingForClusterJoin, in.OwnerId, in.ClusterId); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "advance node state: %v", err)
|
||||
}
|
||||
|
||||
vpnPubKey, err := a.core.GetVPNPubKey()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "get vpn publicKey: %v", err)
|
||||
}
|
||||
|
||||
if err := a.core.SetVPNIP(in.NodeVpnIp); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "setting node vpn IP address: %v", err)
|
||||
}
|
||||
|
||||
// add initial peers
|
||||
if err := a.core.UpdatePeers(peer.FromPubProto(in.Peers)); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "synchronizing peers with vpn state: %v", err)
|
||||
}
|
||||
|
||||
// persist node state on disk
|
||||
if err := a.core.PersistNodeState(role.Node, in.OwnerId, in.ClusterId); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "persist node state: %v", err)
|
||||
}
|
||||
|
||||
// regularly get (peer) updates from Coordinator
|
||||
@ -77,17 +77,17 @@ func (a *API) JoinCluster(ctx context.Context, in *pubproto.JoinClusterRequest)
|
||||
defer a.mut.Unlock()
|
||||
|
||||
if err := a.core.RequireState(state.NodeWaitingForClusterJoin); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "node is not in required state for cluster join: %v", err)
|
||||
}
|
||||
|
||||
conn, err := a.dialInsecure(ctx, net.JoinHostPort(in.CoordinatorVpnIp, vpnAPIPort))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "%v", err)
|
||||
return nil, status.Errorf(codes.Unavailable, "dial coordinator: %v", err)
|
||||
}
|
||||
resp, err := vpnproto.NewAPIClient(conn).GetK8SJoinArgs(ctx, &vpnproto.GetK8SJoinArgsRequest{})
|
||||
conn.Close()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "request K8s join string: %v", err)
|
||||
}
|
||||
|
||||
err = a.core.JoinCluster(kubeadm.BootstrapTokenDiscovery{
|
||||
@ -97,11 +97,11 @@ func (a *API) JoinCluster(ctx context.Context, in *pubproto.JoinClusterRequest)
|
||||
})
|
||||
if err != nil {
|
||||
_ = a.core.AdvanceState(state.Failed, nil, nil)
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "joining kubernetes cluster: %v", err)
|
||||
}
|
||||
|
||||
if err := a.core.AdvanceState(state.IsNode, nil, nil); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "advance state to IsNode: %v", err)
|
||||
}
|
||||
|
||||
return &pubproto.JoinClusterResponse{}, nil
|
||||
@ -110,10 +110,10 @@ func (a *API) JoinCluster(ctx context.Context, in *pubproto.JoinClusterRequest)
|
||||
// TriggerNodeUpdate is the RPC call to request this node to get an update from the Coordinator.
|
||||
func (a *API) TriggerNodeUpdate(ctx context.Context, in *pubproto.TriggerNodeUpdateRequest) (*pubproto.TriggerNodeUpdateResponse, error) {
|
||||
if err := a.core.RequireState(state.IsNode); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "node is not in required state for receiving update command: %v", err)
|
||||
}
|
||||
if err := a.update(ctx); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
return nil, status.Errorf(codes.Internal, "node update: %v", err)
|
||||
}
|
||||
return &pubproto.TriggerNodeUpdateResponse{}, nil
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user