2022-09-05 03:06:08 -04:00
|
|
|
/*
|
|
|
|
Copyright (c) Edgeless Systems GmbH
|
|
|
|
|
|
|
|
SPDX-License-Identifier: AGPL-3.0-only
|
|
|
|
*/
|
|
|
|
|
2023-01-19 09:57:50 -05:00
|
|
|
/*
|
|
|
|
# InitServer
|
|
|
|
|
|
|
|
The InitServer is one of the two main components of the bootstrapper.
|
|
|
|
It is responsible for the initial setup of a node, and the initialization of the Kubernetes cluster.
|
|
|
|
|
|
|
|
The InitServer is started on each node, and waits for either a call from the CLI,
|
|
|
|
or for the JoinClient to connect to an existing cluster.
|
|
|
|
|
|
|
|
If a call from the CLI is received, the InitServer bootstraps the Kubernetes cluster, and stops the JoinClient.
|
|
|
|
*/
|
2022-06-21 11:59:12 -04:00
|
|
|
package initserver
|
|
|
|
|
|
|
|
import (
|
2023-05-30 07:47:36 -04:00
|
|
|
"bufio"
|
2022-06-21 11:59:12 -04:00
|
|
|
"context"
|
2023-05-30 07:47:36 -04:00
|
|
|
"errors"
|
2022-06-21 11:59:12 -04:00
|
|
|
"fmt"
|
2023-05-30 07:47:36 -04:00
|
|
|
"io"
|
2024-02-08 09:20:01 -05:00
|
|
|
"log/slog"
|
2022-06-21 11:59:12 -04:00
|
|
|
"net"
|
|
|
|
"strings"
|
2023-03-06 10:48:13 -05:00
|
|
|
"sync"
|
2022-08-01 10:51:34 -04:00
|
|
|
"time"
|
2022-06-21 11:59:12 -04:00
|
|
|
|
2022-09-21 07:47:57 -04:00
|
|
|
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
|
|
|
|
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/diskencryption"
|
2023-05-30 07:47:36 -04:00
|
|
|
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/journald"
|
2022-09-21 07:47:57 -04:00
|
|
|
"github.com/edgelesssys/constellation/v2/internal/atls"
|
|
|
|
"github.com/edgelesssys/constellation/v2/internal/attestation"
|
|
|
|
"github.com/edgelesssys/constellation/v2/internal/crypto"
|
|
|
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
|
|
|
"github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials"
|
|
|
|
"github.com/edgelesssys/constellation/v2/internal/grpc/grpclog"
|
2023-01-12 10:22:47 -05:00
|
|
|
"github.com/edgelesssys/constellation/v2/internal/kms/kms"
|
2023-01-16 05:19:03 -05:00
|
|
|
kmssetup "github.com/edgelesssys/constellation/v2/internal/kms/setup"
|
2022-09-21 07:47:57 -04:00
|
|
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
|
|
|
"github.com/edgelesssys/constellation/v2/internal/nodestate"
|
|
|
|
"github.com/edgelesssys/constellation/v2/internal/role"
|
2023-01-06 06:04:36 -05:00
|
|
|
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
2022-11-26 13:44:34 -05:00
|
|
|
"golang.org/x/crypto/bcrypt"
|
2022-06-21 11:59:12 -04:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
"google.golang.org/grpc/codes"
|
2022-08-01 10:51:34 -04:00
|
|
|
"google.golang.org/grpc/keepalive"
|
2022-06-21 11:59:12 -04:00
|
|
|
"google.golang.org/grpc/status"
|
|
|
|
)
|
|
|
|
|
2022-06-28 12:33:27 -04:00
|
|
|
// Server is the initialization server, which is started on each node.
|
|
|
|
// The server handles initialization calls from the CLI and initializes the
|
|
|
|
// Kubernetes cluster.
|
2022-06-21 11:59:12 -04:00
|
|
|
type Server struct {
|
2023-03-06 10:48:13 -05:00
|
|
|
nodeLock locker
|
|
|
|
initializer ClusterInitializer
|
|
|
|
disk encryptedDisk
|
|
|
|
fileHandler file.Handler
|
|
|
|
grpcServer serveStopper
|
|
|
|
cleaner cleaner
|
|
|
|
issuer atls.Issuer
|
|
|
|
shutdownLock sync.RWMutex
|
2022-06-21 11:59:12 -04:00
|
|
|
|
2022-11-26 13:44:34 -05:00
|
|
|
initSecretHash []byte
|
|
|
|
|
2023-05-30 07:47:36 -04:00
|
|
|
kmsURI string
|
|
|
|
|
2024-02-08 09:20:01 -05:00
|
|
|
log *slog.Logger
|
2022-06-21 11:59:12 -04:00
|
|
|
|
2023-05-30 07:47:36 -04:00
|
|
|
journaldCollector journaldCollection
|
|
|
|
|
2022-06-21 11:59:12 -04:00
|
|
|
initproto.UnimplementedAPIServer
|
|
|
|
}
|
|
|
|
|
2022-06-28 12:33:27 -04:00
|
|
|
// New creates a new initialization server.
|
2024-02-08 09:20:01 -05:00
|
|
|
func New(ctx context.Context, lock locker, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, metadata MetadataAPI, log *slog.Logger) (*Server, error) {
|
|
|
|
log = log.WithGroup("initServer")
|
2022-08-31 14:10:49 -04:00
|
|
|
|
2022-11-26 13:44:34 -05:00
|
|
|
initSecretHash, err := metadata.InitSecretHash(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("retrieving init secret hash: %w", err)
|
|
|
|
}
|
|
|
|
if len(initSecretHash) == 0 {
|
|
|
|
return nil, fmt.Errorf("init secret hash is empty")
|
|
|
|
}
|
|
|
|
|
2023-05-30 07:47:36 -04:00
|
|
|
jctlCollector, err := journald.NewCollector(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-06-21 11:59:12 -04:00
|
|
|
server := &Server{
|
2023-05-30 07:47:36 -04:00
|
|
|
nodeLock: lock,
|
|
|
|
disk: diskencryption.New(),
|
|
|
|
initializer: kube,
|
|
|
|
fileHandler: fh,
|
|
|
|
issuer: issuer,
|
|
|
|
log: log,
|
|
|
|
initSecretHash: initSecretHash,
|
|
|
|
journaldCollector: jctlCollector,
|
2022-06-21 11:59:12 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
grpcServer := grpc.NewServer(
|
2023-01-18 10:49:55 -05:00
|
|
|
grpc.Creds(atlscredentials.New(issuer, nil)),
|
2022-08-01 10:51:34 -04:00
|
|
|
grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}),
|
2024-02-08 09:20:01 -05:00
|
|
|
logger.GetServerUnaryInterceptor(log.WithGroup("gRPC")),
|
2022-06-21 11:59:12 -04:00
|
|
|
)
|
|
|
|
initproto.RegisterAPIServer(grpcServer, server)
|
|
|
|
|
|
|
|
server.grpcServer = grpcServer
|
2022-11-26 13:44:34 -05:00
|
|
|
return server, nil
|
2022-06-21 11:59:12 -04:00
|
|
|
}
|
|
|
|
|
2022-07-08 04:59:59 -04:00
|
|
|
// Serve starts the initialization server.
|
|
|
|
func (s *Server) Serve(ip, port string, cleaner cleaner) error {
|
2022-07-14 09:45:04 -04:00
|
|
|
s.cleaner = cleaner
|
2022-06-21 11:59:12 -04:00
|
|
|
lis, err := net.Listen("tcp", net.JoinHostPort(ip, port))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to listen: %w", err)
|
|
|
|
}
|
2022-08-01 10:51:34 -04:00
|
|
|
|
2024-02-08 09:20:01 -05:00
|
|
|
s.log.Info("Starting")
|
2022-07-14 09:45:04 -04:00
|
|
|
return s.grpcServer.Serve(lis)
|
2022-06-21 11:59:12 -04:00
|
|
|
}
|
|
|
|
|
2022-06-28 12:33:27 -04:00
|
|
|
// Init initializes the cluster.
|
2023-05-30 07:47:36 -04:00
|
|
|
func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServer) (err error) {
|
2023-03-06 10:48:13 -05:00
|
|
|
// Acquire lock to prevent shutdown while Init is still running
|
|
|
|
s.shutdownLock.RLock()
|
|
|
|
defer s.shutdownLock.RUnlock()
|
|
|
|
|
2024-02-08 09:20:01 -05:00
|
|
|
log := s.log.With(slog.String("peer", grpclog.PeerAddrFromContext(stream.Context())))
|
|
|
|
log.Info("Init called")
|
2022-07-05 08:14:11 -04:00
|
|
|
|
2023-05-30 07:47:36 -04:00
|
|
|
s.kmsURI = req.KmsUri
|
|
|
|
|
2022-11-26 13:44:34 -05:00
|
|
|
if err := bcrypt.CompareHashAndPassword(s.initSecretHash, req.InitSecret); err != nil {
|
2023-05-30 07:47:36 -04:00
|
|
|
if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "invalid init secret %s", err)); e != nil {
|
|
|
|
err = errors.Join(err, e)
|
|
|
|
}
|
|
|
|
return err
|
2022-11-26 13:44:34 -05:00
|
|
|
}
|
|
|
|
|
2023-05-30 07:47:36 -04:00
|
|
|
cloudKms, err := kmssetup.KMS(stream.Context(), req.StorageUri, req.KmsUri)
|
2023-01-16 05:19:03 -05:00
|
|
|
if err != nil {
|
2023-05-30 07:47:36 -04:00
|
|
|
if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "creating kms client: %s", err)); e != nil {
|
|
|
|
err = errors.Join(err, e)
|
|
|
|
}
|
|
|
|
return err
|
2023-01-16 05:19:03 -05:00
|
|
|
}
|
|
|
|
|
2022-07-26 04:58:39 -04:00
|
|
|
// generate values for cluster attestation
|
2023-08-07 09:24:46 -04:00
|
|
|
clusterID, err := deriveMeasurementValues(stream.Context(), req.MeasurementSalt, cloudKms)
|
2022-07-08 04:59:59 -04:00
|
|
|
if err != nil {
|
2023-05-30 07:47:36 -04:00
|
|
|
if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "deriving measurement values: %s", err)); e != nil {
|
|
|
|
err = errors.Join(err, e)
|
|
|
|
}
|
|
|
|
return err
|
2022-07-08 04:59:59 -04:00
|
|
|
}
|
|
|
|
|
2022-07-26 04:58:39 -04:00
|
|
|
nodeLockAcquired, err := s.nodeLock.TryLockOnce(clusterID)
|
2022-07-08 04:59:59 -04:00
|
|
|
if err != nil {
|
2023-05-30 07:47:36 -04:00
|
|
|
if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "locking node: %s", err)); e != nil {
|
|
|
|
err = errors.Join(err, e)
|
|
|
|
}
|
|
|
|
return err
|
2022-07-08 04:59:59 -04:00
|
|
|
}
|
|
|
|
if !nodeLockAcquired {
|
2022-06-28 12:33:27 -04:00
|
|
|
// The join client seems to already have a connection to an
|
|
|
|
// existing join service. At this point, any further call to
|
|
|
|
// init does not make sense, so we just stop.
|
|
|
|
//
|
|
|
|
// The server stops itself after the current call is done.
|
2024-02-08 09:20:01 -05:00
|
|
|
log.Warn("Node is already in a join process")
|
2023-05-30 07:47:36 -04:00
|
|
|
|
|
|
|
err = status.Error(codes.FailedPrecondition, "node is already being activated")
|
|
|
|
|
|
|
|
if e := s.sendLogsWithMessage(stream, err); e != nil {
|
|
|
|
err = errors.Join(err, e)
|
|
|
|
}
|
|
|
|
return err
|
2022-06-21 11:59:12 -04:00
|
|
|
}
|
|
|
|
|
2023-03-03 10:50:01 -05:00
|
|
|
// Stop the join client -> We no longer expect to join an existing cluster,
|
|
|
|
// since we are bootstrapping a new one.
|
|
|
|
// Any errors following this call will result in a failed node that may not join any cluster.
|
|
|
|
s.cleaner.Clean()
|
|
|
|
|
2023-05-30 07:47:36 -04:00
|
|
|
if err := s.setupDisk(stream.Context(), cloudKms); err != nil {
|
|
|
|
if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "setting up disk: %s", err)); e != nil {
|
|
|
|
err = errors.Join(err, e)
|
|
|
|
}
|
|
|
|
return err
|
2022-06-21 11:59:12 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
state := nodestate.NodeState{
|
2022-07-26 04:58:39 -04:00
|
|
|
Role: role.ControlPlane,
|
2023-08-07 09:24:46 -04:00
|
|
|
MeasurementSalt: req.MeasurementSalt,
|
2022-06-21 11:59:12 -04:00
|
|
|
}
|
|
|
|
if err := state.ToFile(s.fileHandler); err != nil {
|
2023-05-30 07:47:36 -04:00
|
|
|
if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "persisting node state: %s", err)); e != nil {
|
|
|
|
err = errors.Join(err, e)
|
|
|
|
}
|
|
|
|
return err
|
2022-06-21 11:59:12 -04:00
|
|
|
}
|
|
|
|
|
2023-02-10 07:27:22 -05:00
|
|
|
clusterName := req.ClusterName
|
|
|
|
if clusterName == "" {
|
|
|
|
clusterName = "constellation"
|
|
|
|
}
|
|
|
|
|
2023-05-30 07:47:36 -04:00
|
|
|
kubeconfig, err := s.initializer.InitCluster(stream.Context(),
|
2022-06-21 11:59:12 -04:00
|
|
|
req.KubernetesVersion,
|
2023-02-10 07:27:22 -05:00
|
|
|
clusterName,
|
2022-09-20 04:07:55 -04:00
|
|
|
req.ConformanceMode,
|
2023-12-11 02:08:55 -05:00
|
|
|
req.KubernetesComponents,
|
2023-07-21 10:43:51 -04:00
|
|
|
req.ApiserverCertSans,
|
2023-12-01 08:39:05 -05:00
|
|
|
req.ServiceCidr,
|
2022-07-14 07:30:44 -04:00
|
|
|
s.log,
|
2022-06-21 11:59:12 -04:00
|
|
|
)
|
|
|
|
if err != nil {
|
2023-05-30 07:47:36 -04:00
|
|
|
if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "initializing cluster: %s", err)); e != nil {
|
|
|
|
err = errors.Join(err, e)
|
|
|
|
}
|
|
|
|
return err
|
2022-06-21 11:59:12 -04:00
|
|
|
}
|
|
|
|
|
2024-02-08 09:20:01 -05:00
|
|
|
log.Info("Init succeeded")
|
2023-05-30 07:47:36 -04:00
|
|
|
|
|
|
|
successMessage := &initproto.InitResponse_InitSuccess{
|
|
|
|
InitSuccess: &initproto.InitSuccessResponse{
|
|
|
|
Kubeconfig: kubeconfig,
|
|
|
|
ClusterId: clusterID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
return stream.Send(&initproto.InitResponse{Kind: successMessage})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) sendLogsWithMessage(stream initproto.API_InitServer, message error) error {
|
|
|
|
// send back the error message
|
|
|
|
if err := stream.Send(&initproto.InitResponse{
|
|
|
|
Kind: &initproto.InitResponse_InitFailure{
|
|
|
|
InitFailure: &initproto.InitFailureResponse{Error: message.Error()},
|
|
|
|
},
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
logPipe, err := s.journaldCollector.Start()
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.Internal, "failed starting the log collector: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
reader := bufio.NewReader(logPipe)
|
|
|
|
buffer := make([]byte, 1024)
|
|
|
|
|
|
|
|
for {
|
|
|
|
n, err := io.ReadFull(reader, buffer)
|
|
|
|
buffer = buffer[:n] // cap the buffer so that we don't have a bunch of nullbytes at the end
|
|
|
|
if err != nil {
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != io.ErrUnexpectedEOF {
|
|
|
|
return status.Errorf(codes.Internal, "failed to read from pipe: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = stream.Send(&initproto.InitResponse{
|
|
|
|
Kind: &initproto.InitResponse_Log{
|
|
|
|
Log: &initproto.LogResponseType{
|
|
|
|
Log: buffer,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.Internal, "failed to send chunk: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2022-06-21 11:59:12 -04:00
|
|
|
}
|
|
|
|
|
2022-07-08 04:59:59 -04:00
|
|
|
// Stop stops the initialization server gracefully.
|
|
|
|
func (s *Server) Stop() {
|
2024-02-08 09:20:01 -05:00
|
|
|
s.log.Info("Stopping")
|
2022-11-21 09:44:51 -05:00
|
|
|
|
2023-03-06 10:48:13 -05:00
|
|
|
// Make sure to only stop the server if no Init calls are running
|
|
|
|
s.shutdownLock.Lock()
|
|
|
|
defer s.shutdownLock.Unlock()
|
2022-07-08 04:59:59 -04:00
|
|
|
s.grpcServer.GracefulStop()
|
2022-11-21 09:44:51 -05:00
|
|
|
|
2024-02-08 09:20:01 -05:00
|
|
|
s.log.Info("Stopped")
|
2022-07-08 04:59:59 -04:00
|
|
|
}
|
|
|
|
|
2023-01-16 05:19:03 -05:00
|
|
|
func (s *Server) setupDisk(ctx context.Context, cloudKms kms.CloudKMS) error {
|
2023-07-17 07:55:31 -04:00
|
|
|
free, err := s.disk.Open()
|
|
|
|
if err != nil {
|
2022-06-21 11:59:12 -04:00
|
|
|
return fmt.Errorf("opening encrypted disk: %w", err)
|
|
|
|
}
|
2023-07-17 07:55:31 -04:00
|
|
|
defer free()
|
2022-06-21 11:59:12 -04:00
|
|
|
|
|
|
|
uuid, err := s.disk.UUID()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("retrieving uuid of disk: %w", err)
|
|
|
|
}
|
|
|
|
uuid = strings.ToLower(uuid)
|
|
|
|
|
2023-01-16 05:19:03 -05:00
|
|
|
diskKey, err := cloudKms.GetDEK(ctx, crypto.DEKPrefix+uuid, crypto.StateDiskKeyLength)
|
2022-06-21 11:59:12 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.disk.UpdatePassphrase(string(diskKey))
|
|
|
|
}
|
|
|
|
|
2023-08-07 09:24:46 -04:00
|
|
|
func deriveMeasurementValues(ctx context.Context, measurementSalt []byte, cloudKms kms.CloudKMS) (clusterID []byte, err error) {
|
2023-01-16 05:19:03 -05:00
|
|
|
secret, err := cloudKms.GetDEK(ctx, crypto.DEKPrefix+crypto.MeasurementSecretKeyID, crypto.DerivedKeyLengthDefault)
|
2022-07-26 04:58:39 -04:00
|
|
|
if err != nil {
|
2023-08-07 09:24:46 -04:00
|
|
|
return nil, err
|
2022-07-26 04:58:39 -04:00
|
|
|
}
|
2023-08-07 09:24:46 -04:00
|
|
|
clusterID, err = attestation.DeriveClusterID(secret, measurementSalt)
|
2022-07-26 04:58:39 -04:00
|
|
|
if err != nil {
|
2023-08-07 09:24:46 -04:00
|
|
|
return nil, err
|
2022-07-26 04:58:39 -04:00
|
|
|
}
|
|
|
|
|
2023-08-07 09:24:46 -04:00
|
|
|
return clusterID, nil
|
2022-07-26 04:58:39 -04:00
|
|
|
}
|
|
|
|
|
2022-06-28 12:33:27 -04:00
|
|
|
// ClusterInitializer has the ability to initialize a cluster.
|
2022-06-21 11:59:12 -04:00
|
|
|
type ClusterInitializer interface {
|
2022-06-28 12:33:27 -04:00
|
|
|
// InitCluster initializes a new Kubernetes cluster.
|
2022-06-21 11:59:12 -04:00
|
|
|
InitCluster(
|
|
|
|
ctx context.Context,
|
2022-06-28 12:33:27 -04:00
|
|
|
k8sVersion string,
|
2023-02-10 07:27:22 -05:00
|
|
|
clusterName string,
|
2022-09-20 04:07:55 -04:00
|
|
|
conformanceMode bool,
|
2023-01-06 06:04:36 -05:00
|
|
|
kubernetesComponents components.Components,
|
2023-07-21 10:43:51 -04:00
|
|
|
apiServerCertSANs []string,
|
2023-12-01 08:39:05 -05:00
|
|
|
serviceCIDR string,
|
2024-02-08 09:20:01 -05:00
|
|
|
log *slog.Logger,
|
2022-06-21 11:59:12 -04:00
|
|
|
) ([]byte, error)
|
|
|
|
}
|
|
|
|
|
2022-06-28 12:33:27 -04:00
|
|
|
type encryptedDisk interface {
|
2022-06-21 11:59:12 -04:00
|
|
|
// Open prepares the underlying device for disk operations.
|
2023-07-17 07:55:31 -04:00
|
|
|
Open() (free func(), err error)
|
2022-06-21 11:59:12 -04:00
|
|
|
// UUID gets the device's UUID.
|
|
|
|
UUID() (string, error)
|
|
|
|
// UpdatePassphrase switches the initial random passphrase of the encrypted disk to a permanent passphrase.
|
|
|
|
UpdatePassphrase(passphrase string) error
|
|
|
|
}
|
2022-06-28 12:33:27 -04:00
|
|
|
|
|
|
|
type serveStopper interface {
|
|
|
|
// Serve starts the server.
|
|
|
|
Serve(lis net.Listener) error
|
|
|
|
// GracefulStop stops the server and blocks until all requests are done.
|
|
|
|
GracefulStop()
|
|
|
|
}
|
2022-07-08 04:59:59 -04:00
|
|
|
|
|
|
|
type locker interface {
|
|
|
|
// TryLockOnce tries to lock the node. If the node is already locked, it
|
|
|
|
// returns false. If the node is unlocked, it locks it and returns true.
|
2022-07-26 04:58:39 -04:00
|
|
|
TryLockOnce(clusterID []byte) (bool, error)
|
2022-07-08 04:59:59 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
type cleaner interface {
|
|
|
|
Clean()
|
|
|
|
}
|
2022-11-26 13:44:34 -05:00
|
|
|
|
|
|
|
// MetadataAPI provides information about the instances.
|
|
|
|
type MetadataAPI interface {
|
|
|
|
// InitSecretHash returns the initSecretHash of the instance.
|
|
|
|
InitSecretHash(ctx context.Context) ([]byte, error)
|
|
|
|
}
|
2023-05-30 07:47:36 -04:00
|
|
|
|
|
|
|
// journaldCollection is an interface for collecting journald logs.
|
|
|
|
type journaldCollection interface {
|
|
|
|
// Start starts the journald collector and returns a pipe from which the system logs can be read.
|
|
|
|
Start() (io.ReadCloser, error)
|
|
|
|
}
|