2022-03-22 11:03:15 -04:00
|
|
|
//go:build integration
|
|
|
|
|
|
|
|
package integration
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"crypto/tls"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/docker/docker/api/types"
|
|
|
|
"github.com/docker/docker/api/types/container"
|
|
|
|
"github.com/docker/docker/api/types/strslice"
|
|
|
|
"github.com/docker/docker/client"
|
|
|
|
"github.com/docker/docker/pkg/archive"
|
|
|
|
"github.com/docker/go-connections/nat"
|
|
|
|
"github.com/edgelesssys/constellation/coordinator/atls"
|
|
|
|
"github.com/edgelesssys/constellation/coordinator/core"
|
|
|
|
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
2022-04-13 06:40:57 -04:00
|
|
|
"github.com/edgelesssys/constellation/coordinator/role"
|
2022-03-22 11:03:15 -04:00
|
|
|
"github.com/edgelesssys/constellation/coordinator/store"
|
|
|
|
"github.com/edgelesssys/constellation/coordinator/storewrapper"
|
2022-05-10 06:35:17 -04:00
|
|
|
kms "github.com/edgelesssys/constellation/kms/server/setup"
|
2022-03-22 11:03:15 -04:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"go.uber.org/goleak"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
|
|
|
"google.golang.org/grpc"
|
|
|
|
"google.golang.org/grpc/credentials"
|
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
|
|
|
Notes regarding the integration test implementation:
|
|
|
|
|
|
|
|
Scale:
|
|
|
|
'numberPeers' should be < 30, otherwise activation might stuck, because something with the docker network
|
|
|
|
doesn't scale well (maybe > 50 wireguard kernel interfaces are the reason).
|
|
|
|
With over 150 nodes, the node activation will fail due to Docker internal network naming issues.
|
|
|
|
This could be further extended, but currently the number of possible nodes is enough for this test.
|
|
|
|
|
|
|
|
Usage of docker library:
|
|
|
|
Sometimes the API calls are slower than using the 'sh docker ...' commands. This is specifically the case
|
|
|
|
for the termination. However, to keep the code clean, we accept this tradeoff and use the library functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
const (
|
2022-03-24 16:12:17 -04:00
|
|
|
publicgRPCPort = "9000"
|
2022-03-22 11:03:15 -04:00
|
|
|
constellationImageName = "constellation:latest"
|
2022-04-12 03:38:10 -04:00
|
|
|
etcdImageName = "bitnami/etcd:3.5.2"
|
2022-03-22 11:03:15 -04:00
|
|
|
etcdOverlayNetwork = "constellationIntegrationTest"
|
|
|
|
masterSecret = "ConstellationIntegrationTest"
|
2022-04-21 09:32:03 -04:00
|
|
|
localLogDirectory = "/tmp/coordinator/logs"
|
2022-03-22 11:03:15 -04:00
|
|
|
numberFirstActivation = 3
|
|
|
|
numberSecondaryActivation = 3
|
|
|
|
numberThirdActivation = 3
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2022-04-13 06:40:57 -04:00
|
|
|
hostconfigConstellationPeer = &container.HostConfig{
|
2022-03-24 16:12:17 -04:00
|
|
|
Binds: []string{"/dev/net/tun:/dev/net/tun"}, // necessary for wireguard interface creation
|
|
|
|
CapAdd: strslice.StrSlice{"NET_ADMIN"}, // necessary for wireguard interface creation
|
|
|
|
AutoRemove: true,
|
2022-03-22 11:03:15 -04:00
|
|
|
}
|
2022-04-13 06:40:57 -04:00
|
|
|
configConstellationPeer = &container.Config{
|
2022-03-22 11:03:15 -04:00
|
|
|
Image: constellationImageName,
|
2022-03-24 16:12:17 -04:00
|
|
|
AttachStdout: true, // necessary to attach to the container log
|
|
|
|
AttachStderr: true, // necessary to attach to the container log
|
|
|
|
Tty: true, // necessary to attach to the container log
|
2022-03-22 11:03:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
hostconfigEtcd = &container.HostConfig{
|
|
|
|
AutoRemove: true,
|
|
|
|
}
|
|
|
|
configEtcd = &container.Config{
|
|
|
|
Image: etcdImageName,
|
|
|
|
Env: []string{
|
|
|
|
"ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379",
|
|
|
|
"ETCD_ADVERTISE_CLIENT_URLS=http://127.0.0.1:2379",
|
|
|
|
"ETCD_LOG_LEVEL=debug",
|
|
|
|
"ETCD_DATA_DIR=/bitnami/etcd/data",
|
|
|
|
},
|
|
|
|
Entrypoint: []string{"/opt/bitnami/etcd/bin/etcd"},
|
|
|
|
AttachStdout: true,
|
|
|
|
AttachStderr: true,
|
|
|
|
Tty: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
constellationDockerImageBuildOptions = types.ImageBuildOptions{
|
|
|
|
Dockerfile: "test/Dockerfile",
|
|
|
|
Tags: []string{constellationImageName},
|
|
|
|
Remove: true,
|
|
|
|
ForceRemove: true,
|
|
|
|
SuppressOutput: false,
|
|
|
|
PullParent: true,
|
|
|
|
}
|
|
|
|
containerLogConfig = types.ContainerLogsOptions{
|
|
|
|
ShowStdout: true,
|
|
|
|
Follow: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
wgExecConfig = types.ExecConfig{
|
|
|
|
Cmd: []string{"wg"},
|
|
|
|
AttachStdout: true,
|
|
|
|
AttachStderr: true,
|
|
|
|
}
|
|
|
|
pingExecConfig = types.ExecConfig{
|
|
|
|
AttachStdout: true,
|
|
|
|
AttachStderr: true,
|
|
|
|
}
|
2022-04-13 06:40:57 -04:00
|
|
|
activeCoordinators []string
|
2022-04-21 09:32:03 -04:00
|
|
|
coordinatorCounter int
|
|
|
|
nodeCounter int
|
2022-03-22 11:03:15 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
type peerInfo struct {
|
|
|
|
dockerData container.ContainerCreateCreatedBody
|
|
|
|
isCoordinator bool
|
|
|
|
vpnIP string
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMain(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
require := require.New(t)
|
|
|
|
|
2022-03-24 16:12:17 -04:00
|
|
|
activePeers := make(map[string]peerInfo)
|
2022-03-22 11:03:15 -04:00
|
|
|
|
|
|
|
defer goleak.VerifyNone(t,
|
|
|
|
// https://github.com/census-instrumentation/opencensus-go/issues/1262
|
|
|
|
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
|
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
cwd, err := os.Getwd()
|
|
|
|
require.NoError(err)
|
|
|
|
require.NoError(os.Chdir(filepath.Join(cwd, "..")))
|
2022-04-21 09:32:03 -04:00
|
|
|
require.NoError(createTempDir())
|
2022-03-22 11:03:15 -04:00
|
|
|
// setup Docker containers
|
|
|
|
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
|
|
|
require.NoError(err)
|
|
|
|
defer cli.Close()
|
|
|
|
|
|
|
|
versionInfo, err := cli.Info(ctx)
|
|
|
|
require.NoError(err)
|
|
|
|
t.Logf("start integration test, local docker version %v", versionInfo.ServerVersion)
|
|
|
|
|
2022-04-21 09:32:03 -04:00
|
|
|
require.NoError(imageBuild(ctx, cli))
|
2022-03-22 11:03:15 -04:00
|
|
|
defer cli.ImageRemove(ctx, constellationImageName, types.ImageRemoveOptions{Force: true, PruneChildren: true})
|
|
|
|
|
|
|
|
reader, err := cli.ImagePull(ctx, etcdImageName, types.ImagePullOptions{})
|
|
|
|
require.NoError(err)
|
|
|
|
_, err = io.Copy(os.Stdout, reader)
|
|
|
|
require.NoError(err)
|
|
|
|
require.NoError(reader.Close())
|
|
|
|
|
|
|
|
// Add another docker network to be able to resolve etcd-storage from the coordinator.
|
|
|
|
// This is not possible in the default "bridge" network.
|
|
|
|
dockerNetwork, err := cli.NetworkCreate(ctx, etcdOverlayNetwork, types.NetworkCreate{Driver: "bridge", Internal: true})
|
|
|
|
require.NoError(err)
|
|
|
|
defer cli.NetworkRemove(ctx, etcdOverlayNetwork)
|
|
|
|
|
|
|
|
// setup etcd
|
|
|
|
t.Log("create etcd container...")
|
|
|
|
respEtcd, err := cli.ContainerCreate(ctx, configEtcd, hostconfigEtcd, nil, nil, "etcd-storage")
|
|
|
|
require.NoError(err)
|
|
|
|
require.NoError(cli.ContainerStart(ctx, respEtcd.ID, types.ContainerStartOptions{}))
|
|
|
|
defer killDockerContainer(ctx, cli, respEtcd)
|
|
|
|
require.NoError(cli.NetworkConnect(ctx, dockerNetwork.ID, respEtcd.ID, nil))
|
|
|
|
etcdData, err := cli.ContainerInspect(ctx, respEtcd.ID)
|
|
|
|
require.NoError(err)
|
|
|
|
etcdIPAddr := etcdData.NetworkSettings.DefaultNetworkSettings.IPAddress
|
|
|
|
etcdstore, err := store.NewEtcdStore(net.JoinHostPort(etcdIPAddr, "2379"), false, zap.NewNop())
|
|
|
|
require.NoError(err)
|
|
|
|
defer etcdstore.Close()
|
|
|
|
|
2022-03-24 16:12:17 -04:00
|
|
|
defer killDockerContainers(ctx, cli, activePeers)
|
2022-04-13 06:40:57 -04:00
|
|
|
// setup coordinator containers
|
|
|
|
t.Log("create 1st coordinator container...")
|
2022-04-21 09:32:03 -04:00
|
|
|
require.NoError(createCoordinatorContainer(ctx, cli, "master-1", dockerNetwork.ID, activePeers))
|
2022-04-13 06:40:57 -04:00
|
|
|
t.Log("create 2nd coordinator container...")
|
2022-04-21 09:32:03 -04:00
|
|
|
require.NoError(createCoordinatorContainer(ctx, cli, "master-2", dockerNetwork.ID, activePeers))
|
2022-03-22 11:03:15 -04:00
|
|
|
// 1st activation phase
|
2022-04-13 06:40:57 -04:00
|
|
|
ips, err := spawnContainers(ctx, cli, numberFirstActivation, activePeers)
|
2022-03-22 11:03:15 -04:00
|
|
|
require.NoError(err)
|
|
|
|
|
2022-04-13 06:40:57 -04:00
|
|
|
t.Logf("node ips: %v", ips)
|
2022-03-22 11:03:15 -04:00
|
|
|
t.Log("activate coordinator...")
|
|
|
|
start := time.Now()
|
2022-04-13 06:40:57 -04:00
|
|
|
assert.NoError(startCoordinator(ctx, activeCoordinators[0], ips))
|
2022-03-22 11:03:15 -04:00
|
|
|
elapsed := time.Since(start)
|
|
|
|
t.Logf("activation took %v", elapsed)
|
2022-04-13 06:40:57 -04:00
|
|
|
// activate additional coordinator
|
|
|
|
require.NoError(addNewCoordinatorToCoordinator(ctx, activeCoordinators[1], activeCoordinators[0]))
|
2022-03-24 16:12:17 -04:00
|
|
|
require.NoError(updateVPNIPs(activePeers, etcdstore))
|
2022-03-22 11:03:15 -04:00
|
|
|
|
|
|
|
t.Log("count peers in instances")
|
2022-03-24 16:12:17 -04:00
|
|
|
countPeersTest(ctx, t, cli, wgExecConfig, activePeers)
|
2022-03-22 11:03:15 -04:00
|
|
|
t.Log("start ping test")
|
2022-03-24 16:12:17 -04:00
|
|
|
pingTest(ctx, t, cli, pingExecConfig, activePeers, etcdstore)
|
2022-03-22 11:03:15 -04:00
|
|
|
|
|
|
|
// 2nd activation phase
|
2022-04-13 06:40:57 -04:00
|
|
|
ips, err = spawnContainers(ctx, cli, numberSecondaryActivation, activePeers)
|
2022-03-22 11:03:15 -04:00
|
|
|
require.NoError(err)
|
2022-04-13 06:40:57 -04:00
|
|
|
t.Logf("node ips: %v", ips)
|
2022-03-22 11:03:15 -04:00
|
|
|
t.Log("add additional nodes")
|
|
|
|
start = time.Now()
|
2022-04-13 06:40:57 -04:00
|
|
|
assert.NoError(addNewNodesToCoordinator(ctx, activeCoordinators[1], ips))
|
2022-03-22 11:03:15 -04:00
|
|
|
elapsed = time.Since(start)
|
|
|
|
t.Logf("adding took %v", elapsed)
|
2022-03-24 16:12:17 -04:00
|
|
|
require.NoError(updateVPNIPs(activePeers, etcdstore))
|
2022-03-22 11:03:15 -04:00
|
|
|
|
|
|
|
t.Log("count peers in instances")
|
2022-03-24 16:12:17 -04:00
|
|
|
countPeersTest(ctx, t, cli, wgExecConfig, activePeers)
|
2022-03-22 11:03:15 -04:00
|
|
|
t.Log("start ping test")
|
2022-03-24 16:12:17 -04:00
|
|
|
pingTest(ctx, t, cli, pingExecConfig, activePeers, etcdstore)
|
2022-03-22 11:03:15 -04:00
|
|
|
|
2022-04-13 06:40:57 -04:00
|
|
|
// ----------------------------------------------------------------
|
|
|
|
t.Log("create 3nd coordinator container...")
|
2022-04-21 09:32:03 -04:00
|
|
|
require.NoError(createCoordinatorContainer(ctx, cli, "master-3", dockerNetwork.ID, activePeers))
|
2022-04-13 06:40:57 -04:00
|
|
|
// activate additional coordinator
|
|
|
|
require.NoError(addNewCoordinatorToCoordinator(ctx, activeCoordinators[2], activeCoordinators[1]))
|
|
|
|
require.NoError(updateVPNIPs(activePeers, etcdstore))
|
|
|
|
|
2022-03-22 11:03:15 -04:00
|
|
|
// 3rd activation phase
|
2022-04-13 06:40:57 -04:00
|
|
|
ips, err = spawnContainers(ctx, cli, numberThirdActivation, activePeers)
|
2022-03-22 11:03:15 -04:00
|
|
|
require.NoError(err)
|
2022-04-13 06:40:57 -04:00
|
|
|
t.Logf("node ips: %v", ips)
|
2022-03-22 11:03:15 -04:00
|
|
|
t.Log("add additional nodes")
|
|
|
|
start = time.Now()
|
2022-04-13 06:40:57 -04:00
|
|
|
assert.NoError(addNewNodesToCoordinator(ctx, activeCoordinators[2], ips))
|
2022-03-22 11:03:15 -04:00
|
|
|
elapsed = time.Since(start)
|
|
|
|
t.Logf("adding took %v", elapsed)
|
2022-03-24 16:12:17 -04:00
|
|
|
require.NoError(updateVPNIPs(activePeers, etcdstore))
|
2022-03-22 11:03:15 -04:00
|
|
|
|
|
|
|
t.Log("count peers in instances")
|
2022-03-24 16:12:17 -04:00
|
|
|
countPeersTest(ctx, t, cli, wgExecConfig, activePeers)
|
2022-03-22 11:03:15 -04:00
|
|
|
t.Log("start ping test")
|
2022-03-24 16:12:17 -04:00
|
|
|
pingTest(ctx, t, cli, pingExecConfig, activePeers, etcdstore)
|
2022-03-22 11:03:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// helper methods
|
2022-04-13 06:40:57 -04:00
|
|
|
func startCoordinator(ctx context.Context, coordinatorAddr string, ips []string) error {
|
2022-05-24 10:33:44 -04:00
|
|
|
tlsConfig, err := atls.CreateAttestationClientTLSConfig(nil, []atls.Validator{&core.MockValidator{}})
|
2022-03-22 11:03:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-03-24 16:12:17 -04:00
|
|
|
conn, err := grpc.DialContext(ctx, net.JoinHostPort(coordinatorAddr, publicgRPCPort), grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
|
2022-03-22 11:03:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
client := pubproto.NewAPIClient(conn)
|
|
|
|
adminKey, err := wgtypes.GeneratePrivateKey()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
adminKey = adminKey.PublicKey()
|
|
|
|
|
|
|
|
stream, err := client.ActivateAsCoordinator(ctx, &pubproto.ActivateAsCoordinatorRequest{
|
2022-04-13 06:40:57 -04:00
|
|
|
AdminVpnPubKey: adminKey[:],
|
|
|
|
NodePublicIps: ips,
|
|
|
|
MasterSecret: []byte(masterSecret),
|
|
|
|
KmsUri: kms.ClusterKMSURI,
|
|
|
|
StorageUri: kms.NoStoreURI,
|
|
|
|
KeyEncryptionKeyId: "",
|
|
|
|
UseExistingKek: false,
|
2022-03-22 11:03:15 -04:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
_, err := stream.Recv()
|
|
|
|
if err == io.EOF {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-21 09:32:03 -04:00
|
|
|
func createTempDir() error {
|
|
|
|
if err := os.RemoveAll(localLogDirectory); err != nil && !os.IsNotExist(err) {
|
|
|
|
return err
|
2022-05-04 06:10:23 -04:00
|
|
|
}
|
2022-04-21 09:32:03 -04:00
|
|
|
return os.MkdirAll(localLogDirectory, 0o755)
|
|
|
|
}
|
|
|
|
|
2022-04-13 06:40:57 -04:00
|
|
|
func addNewCoordinatorToCoordinator(ctx context.Context, newCoordinatorAddr, oldCoordinatorAddr string) error {
|
2022-05-24 10:33:44 -04:00
|
|
|
tlsConfig, err := atls.CreateAttestationClientTLSConfig(nil, []atls.Validator{&core.MockValidator{}})
|
2022-04-13 06:40:57 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
conn, err := grpc.DialContext(ctx, net.JoinHostPort(oldCoordinatorAddr, publicgRPCPort), grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
client := pubproto.NewAPIClient(conn)
|
|
|
|
|
|
|
|
_, err = client.ActivateAdditionalCoordinator(ctx, &pubproto.ActivateAdditionalCoordinatorRequest{
|
|
|
|
CoordinatorPublicIp: newCoordinatorAddr,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func addNewNodesToCoordinator(ctx context.Context, coordinatorAddr string, ips []string) error {
|
2022-05-24 10:33:44 -04:00
|
|
|
tlsConfig, err := atls.CreateAttestationClientTLSConfig(nil, []atls.Validator{&core.MockValidator{}})
|
2022-03-22 11:03:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-03-24 16:12:17 -04:00
|
|
|
conn, err := grpc.DialContext(ctx, net.JoinHostPort(coordinatorAddr, publicgRPCPort), grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
|
2022-03-22 11:03:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
client := pubproto.NewAPIClient(conn)
|
|
|
|
|
2022-04-13 06:40:57 -04:00
|
|
|
stream, err := client.ActivateAdditionalNodes(ctx, &pubproto.ActivateAdditionalNodesRequest{NodePublicIps: ips})
|
2022-03-22 11:03:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
_, err := stream.Recv()
|
|
|
|
if err == io.EOF {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func spawnContainers(ctx context.Context, cli *client.Client, count int, activeContainers map[string]peerInfo) ([]string, error) {
|
2022-04-13 06:40:57 -04:00
|
|
|
tmpPeerIPs := make([]string, 0, count)
|
2022-03-22 11:03:15 -04:00
|
|
|
// spawn client container(s) and obtain their docker network ip address
|
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
resp, err := createNewNode(ctx, cli)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-04-21 09:32:03 -04:00
|
|
|
attachDockerContainerStdoutStderrToFile(ctx, cli, resp.containerResponse.ID, role.Node)
|
2022-04-13 06:40:57 -04:00
|
|
|
tmpPeerIPs = append(tmpPeerIPs, resp.dockerIPAddr)
|
2022-03-22 11:03:15 -04:00
|
|
|
containerData, err := cli.ContainerInspect(ctx, resp.containerResponse.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
activeContainers[containerData.NetworkSettings.DefaultNetworkSettings.IPAddress] = peerInfo{dockerData: resp.containerResponse, isCoordinator: false}
|
|
|
|
}
|
2022-04-13 06:40:57 -04:00
|
|
|
return tmpPeerIPs, blockUntilUp(ctx, tmpPeerIPs)
|
|
|
|
}
|
|
|
|
|
2022-04-21 09:32:03 -04:00
|
|
|
func createCoordinatorContainer(ctx context.Context, cli *client.Client, name, dockerNetworkID string, activePeers map[string]peerInfo) error {
|
2022-04-13 06:40:57 -04:00
|
|
|
resp, err := cli.ContainerCreate(ctx, configConstellationPeer, hostconfigConstellationPeer, nil, nil, name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-04-21 09:32:03 -04:00
|
|
|
attachDockerContainerStdoutStderrToFile(ctx, cli, resp.ID, role.Coordinator)
|
2022-04-13 06:40:57 -04:00
|
|
|
coordinatorData, err := cli.ContainerInspect(ctx, resp.ID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
activePeers[coordinatorData.NetworkSettings.DefaultNetworkSettings.IPAddress] = peerInfo{dockerData: resp, isCoordinator: true}
|
|
|
|
activeCoordinators = append(activeCoordinators, coordinatorData.NetworkSettings.DefaultNetworkSettings.IPAddress)
|
|
|
|
return cli.NetworkConnect(ctx, dockerNetworkID, resp.ID, nil)
|
2022-03-22 11:03:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make the port forward binding, so we can access the coordinator from the host
|
|
|
|
func makeBinding(ip, internalPort string, externalPort string) nat.PortMap {
|
|
|
|
binding := nat.PortBinding{
|
|
|
|
HostIP: ip,
|
|
|
|
HostPort: externalPort,
|
|
|
|
}
|
|
|
|
bindingMap := map[nat.Port][]nat.PortBinding{nat.Port(fmt.Sprintf("%s/tcp", internalPort)): {binding}}
|
|
|
|
return nat.PortMap(bindingMap)
|
|
|
|
}
|
|
|
|
|
|
|
|
func killDockerContainers(ctx context.Context, cli *client.Client, activeContainers map[string]peerInfo) {
|
|
|
|
for _, v := range activeContainers {
|
|
|
|
killDockerContainer(ctx, cli, v.dockerData)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func killDockerContainer(ctx context.Context, cli *client.Client, container container.ContainerCreateCreatedBody) {
|
|
|
|
fmt.Print("Kill container ", container.ID[:10], "... ")
|
|
|
|
if err := cli.ContainerKill(ctx, container.ID, "9"); err != nil {
|
|
|
|
fmt.Println(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
fmt.Println("Success")
|
|
|
|
}
|
|
|
|
|
2022-04-21 09:32:03 -04:00
|
|
|
func attachDockerContainerStdoutStderrToFile(ctx context.Context, cli *client.Client, id string, peerRole role.Role) {
|
2022-03-22 11:03:15 -04:00
|
|
|
resp, err := cli.ContainerLogs(ctx, id, containerLogConfig)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2022-04-21 09:32:03 -04:00
|
|
|
var file *os.File
|
|
|
|
switch peerRole {
|
|
|
|
case role.Node:
|
|
|
|
file, err = os.Create(fmt.Sprintf("%s/node-%d", localLogDirectory, nodeCounter))
|
|
|
|
nodeCounter += 1
|
|
|
|
case role.Coordinator:
|
|
|
|
file, err = os.Create(fmt.Sprintf("%s/coordinator-%d", localLogDirectory, coordinatorCounter))
|
|
|
|
coordinatorCounter += 1
|
|
|
|
default:
|
|
|
|
panic("invalid role")
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
go io.Copy(file, resp) // TODO: this goroutine leaks
|
2022-03-22 11:03:15 -04:00
|
|
|
}
|
|
|
|
|
2022-04-21 09:32:03 -04:00
|
|
|
func imageBuild(ctx context.Context, dockerClient *client.Client) error {
|
2022-03-22 11:03:15 -04:00
|
|
|
// Docker need a BuildContext, generate it...
|
|
|
|
tar, err := archive.TarWithOptions(".", &archive.TarOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := dockerClient.ImageBuild(ctx, tar, constellationDockerImageBuildOptions)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
2022-04-21 09:32:03 -04:00
|
|
|
if _, err := io.Copy(os.Stdout, resp.Body); err != nil {
|
|
|
|
return err
|
2022-03-22 11:03:15 -04:00
|
|
|
}
|
|
|
|
// Block until EOF, so the build has finished if we continue
|
|
|
|
_, err = io.Copy(io.Discard, resp.Body)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// count number of wireguard peers within all active docker containers
|
|
|
|
func countPeersTest(ctx context.Context, t *testing.T, cli *client.Client, execConfig types.ExecConfig, activeContainers map[string]peerInfo) {
|
|
|
|
t.Run("countPeerTest", func(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
require := require.New(t)
|
|
|
|
for ip, id := range activeContainers {
|
|
|
|
respIDExecCreate, err := cli.ContainerExecCreate(ctx, id.dockerData.ID, execConfig)
|
|
|
|
require.NoError(err)
|
|
|
|
respID, err := cli.ContainerExecAttach(ctx, respIDExecCreate.ID, types.ExecStartCheck{})
|
|
|
|
require.NoError(err)
|
|
|
|
output, err := io.ReadAll(respID.Reader)
|
|
|
|
require.NoError(err)
|
|
|
|
respID.Close()
|
|
|
|
countedPeers := strings.Count(string(output), "peer")
|
|
|
|
fmt.Printf("% 3d peers in container %s [%s] out of % 3d total nodes \n", countedPeers, id.dockerData.ID, ip, len(activeContainers))
|
|
|
|
|
2022-04-13 06:40:57 -04:00
|
|
|
assert.Equal(len(activeContainers), countedPeers)
|
2022-03-22 11:03:15 -04:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func pingTest(ctx context.Context, t *testing.T, cli *client.Client, execConfig types.ExecConfig, activeContainers map[string]peerInfo, etcdstore store.Store) {
|
|
|
|
t.Run("pingTest", func(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
require := require.New(t)
|
2022-04-13 06:40:57 -04:00
|
|
|
peerVPNIPsWithoutAdmins, err := getPeerVPNIPsFromEtcd(etcdstore)
|
2022-03-22 11:03:15 -04:00
|
|
|
require.NoError(err)
|
|
|
|
// all nodes + coordinator are peers
|
2022-04-13 06:40:57 -04:00
|
|
|
require.Equal(len(peerVPNIPsWithoutAdmins), len(activeContainers))
|
2022-03-22 11:03:15 -04:00
|
|
|
|
2022-04-13 06:40:57 -04:00
|
|
|
for i := 0; i < len(peerVPNIPsWithoutAdmins); i++ {
|
|
|
|
execConfig.Cmd = []string{"ping", "-q", "-c", "1", "-W", "1", peerVPNIPsWithoutAdmins[i]}
|
2022-03-22 11:03:15 -04:00
|
|
|
for _, id := range activeContainers {
|
2022-04-13 06:40:57 -04:00
|
|
|
fmt.Printf("Ping from container %v | % 19s to container % 19s", id.dockerData.ID, id.vpnIP, peerVPNIPsWithoutAdmins[i])
|
2022-03-22 11:03:15 -04:00
|
|
|
|
|
|
|
respIDExecCreate, err := cli.ContainerExecCreate(ctx, id.dockerData.ID, execConfig)
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
err = cli.ContainerExecStart(ctx, respIDExecCreate.ID, types.ExecStartCheck{})
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
resp, err := cli.ContainerExecInspect(ctx, respIDExecCreate.ID)
|
|
|
|
require.NoError(err)
|
|
|
|
assert.Equal(0, resp.ExitCode)
|
|
|
|
if resp.ExitCode == 0 {
|
|
|
|
fmt.Printf(" ...Success\n")
|
|
|
|
} else {
|
|
|
|
fmt.Printf(" ...Failure\n")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
type newNodeData struct {
|
|
|
|
containerResponse container.ContainerCreateCreatedBody
|
|
|
|
dockerIPAddr string
|
|
|
|
}
|
|
|
|
|
|
|
|
// pass error one level up
|
|
|
|
func createNewNode(ctx context.Context, cli *client.Client) (*newNodeData, error) {
|
2022-04-13 06:40:57 -04:00
|
|
|
resp, err := cli.ContainerCreate(ctx, configConstellationPeer, hostconfigConstellationPeer, nil, nil, "")
|
2022-03-22 11:03:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
containerData, err := cli.ContainerInspect(ctx, resp.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
fmt.Printf("created Node %v\n", containerData.ID)
|
2022-04-13 06:40:57 -04:00
|
|
|
return &newNodeData{resp, containerData.NetworkSettings.IPAddress}, nil
|
2022-03-22 11:03:15 -04:00
|
|
|
}
|
|
|
|
|
2022-04-13 06:40:57 -04:00
|
|
|
func awaitPeerResponse(ctx context.Context, ip string, tlsConfig *tls.Config) error {
|
2022-03-22 11:03:15 -04:00
|
|
|
// Block, so the connection gets established/fails immediately
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
|
|
defer cancel()
|
2022-04-13 06:40:57 -04:00
|
|
|
conn, err := grpc.DialContext(ctx, net.JoinHostPort(ip, publicgRPCPort), grpc.WithBlock(), grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
|
2022-03-22 11:03:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return conn.Close()
|
|
|
|
}
|
|
|
|
|
2022-04-13 06:40:57 -04:00
|
|
|
func blockUntilUp(ctx context.Context, peerIPs []string) error {
|
2022-05-24 10:33:44 -04:00
|
|
|
tlsConfig, err := atls.CreateAttestationClientTLSConfig(nil, []atls.Validator{&core.MockValidator{}})
|
2022-03-22 11:03:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-04-13 06:40:57 -04:00
|
|
|
for _, ip := range peerIPs {
|
2022-03-22 11:03:15 -04:00
|
|
|
// Block, so the connection gets established/fails immediately
|
2022-04-13 06:40:57 -04:00
|
|
|
if err := awaitPeerResponse(ctx, ip, tlsConfig); err != nil {
|
2022-03-22 11:03:15 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getPeerVPNIPsFromEtcd(etcdstore store.Store) ([]string, error) {
|
|
|
|
peers, err := storewrapper.StoreWrapper{Store: etcdstore}.GetPeers()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
vpnIPS := make([]string, 0, len(peers))
|
|
|
|
|
2022-04-13 06:40:57 -04:00
|
|
|
for _, peer := range peers {
|
|
|
|
if peer.Role != role.Admin {
|
|
|
|
vpnIPS = append(vpnIPS, peer.VPNIP)
|
|
|
|
}
|
2022-03-22 11:03:15 -04:00
|
|
|
}
|
|
|
|
return vpnIPS, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func translatePublicToVPNIP(publicIP string, etcdstore store.Store) (string, error) {
|
|
|
|
peers, err := storewrapper.StoreWrapper{Store: etcdstore}.GetPeers()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
for _, peer := range peers {
|
2022-04-13 06:40:57 -04:00
|
|
|
if peer.PublicIP == publicIP {
|
2022-03-22 11:03:15 -04:00
|
|
|
return peer.VPNIP, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return "", errors.New("Did not found VPN IP")
|
|
|
|
}
|
|
|
|
|
|
|
|
func updateVPNIPs(activeContainers map[string]peerInfo, etcdstore store.Store) error {
|
|
|
|
for publicIP, v := range activeContainers {
|
|
|
|
vpnIP, err := translatePublicToVPNIP(publicIP, etcdstore)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
v.vpnIP = vpnIP
|
|
|
|
activeContainers[publicIP] = v
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|