Rename coordinator to bootstrapper and rename roles

This commit is contained in:
katexochen 2022-06-29 15:26:29 +02:00 committed by Paul Meyer
parent 3280ed200c
commit 916e5d6b55
191 changed files with 1763 additions and 2030 deletions

View file

@ -0,0 +1,230 @@
package kubernetes
import (
"context"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
"github.com/edgelesssys/constellation/internal/cloud/metadata"
k8s "k8s.io/api/core/v1"
)
// ProviderMetadata implementers read/write cloud provider metadata.
type ProviderMetadata interface {
// List retrieves all instances belonging to the current Constellation.
List(ctx context.Context) ([]metadata.InstanceMetadata, error)
// Self retrieves the current instance.
Self(ctx context.Context) (metadata.InstanceMetadata, error)
// GetSubnetworkCIDR retrieves the subnetwork CIDR for the current instance.
GetSubnetworkCIDR(ctx context.Context) (string, error)
// SupportsLoadBalancer returns true if the cloud provider supports load balancers.
SupportsLoadBalancer() bool
// GetLoadBalancerIP retrieves the load balancer IP.
GetLoadBalancerIP(ctx context.Context) (string, error)
// GetInstance retrieves an instance using its providerID.
GetInstance(ctx context.Context, providerID string) (metadata.InstanceMetadata, error)
// Supported is used to determine if metadata API is implemented for this cloud provider.
Supported() bool
}
// CloudControllerManager implementers provide configuration for the k8s cloud-controller-manager.
type CloudControllerManager interface {
// Image returns the container image used to provide cloud-controller-manager for the cloud-provider.
Image() string
// Path returns the path used by cloud-controller-manager executable within the container image.
Path() string
// Name returns the cloud-provider name as used by k8s cloud-controller-manager (k8s.gcr.io/cloud-controller-manager).
Name() string
// ExtraArgs returns a list of arguments to append to the cloud-controller-manager command.
ExtraArgs() []string
// ConfigMaps returns a list of ConfigMaps to deploy together with the k8s cloud-controller-manager
// Reference: https://kubernetes.io/docs/concepts/configuration/configmap/ .
ConfigMaps(instance metadata.InstanceMetadata) (resources.ConfigMaps, error)
// Secrets returns a list of secrets to deploy together with the k8s cloud-controller-manager.
// Reference: https://kubernetes.io/docs/concepts/configuration/secret/ .
Secrets(ctx context.Context, providerID, cloudServiceAccountURI string) (resources.Secrets, error)
// Volumes returns a list of volumes to deploy together with the k8s cloud-controller-manager.
// Reference: https://kubernetes.io/docs/concepts/storage/volumes/ .
Volumes() []k8s.Volume
// VolumeMounts a list of of volume mounts to deploy together with the k8s cloud-controller-manager.
VolumeMounts() []k8s.VolumeMount
// Env returns a list of k8s environment key-value pairs to deploy together with the k8s cloud-controller-manager.
Env() []k8s.EnvVar
// Supported is used to determine if cloud controller manager is implemented for this cloud provider.
Supported() bool
}
// CloudNodeManager implementers provide configuration for the k8s cloud-node-manager.
type CloudNodeManager interface {
// Image returns the container image used to provide cloud-node-manager for the cloud-provider.
Image() string
// Path returns the path used by cloud-node-manager executable within the container image.
Path() string
// ExtraArgs returns a list of arguments to append to the cloud-node-manager command.
ExtraArgs() []string
// Supported is used to determine if cloud node manager is implemented for this cloud provider.
Supported() bool
}
// ClusterAutoscaler implementers provide configuration for the k8s cluster-autoscaler.
type ClusterAutoscaler interface {
// Name returns the cloud-provider name as used by k8s cluster-autoscaler.
Name() string
// Secrets returns a list of secrets to deploy together with the k8s cluster-autoscaler.
Secrets(providerID, cloudServiceAccountURI string) (resources.Secrets, error)
// Volumes returns a list of volumes to deploy together with the k8s cluster-autoscaler.
Volumes() []k8s.Volume
// VolumeMounts returns a list of volume mounts to deploy together with the k8s cluster-autoscaler.
VolumeMounts() []k8s.VolumeMount
// Env returns a list of k8s environment key-value pairs to deploy together with the k8s cluster-autoscaler.
Env() []k8s.EnvVar
// Supported is used to determine if cluster autoscaler is implemented for this cloud provider.
Supported() bool
}
type stubProviderMetadata struct {
GetLoadBalancerIPErr error
GetLoadBalancerIPResp string
GetSubnetworkCIDRErr error
GetSubnetworkCIDRResp string
ListErr error
ListResp []metadata.InstanceMetadata
SelfErr error
SelfResp metadata.InstanceMetadata
GetInstanceErr error
GetInstanceResp metadata.InstanceMetadata
SupportedResp bool
SupportsLoadBalancerResp bool
}
func (m *stubProviderMetadata) GetLoadBalancerIP(ctx context.Context) (string, error) {
return m.GetLoadBalancerIPResp, m.GetLoadBalancerIPErr
}
func (m *stubProviderMetadata) GetSubnetworkCIDR(ctx context.Context) (string, error) {
return m.GetSubnetworkCIDRResp, m.GetSubnetworkCIDRErr
}
func (m *stubProviderMetadata) List(ctx context.Context) ([]metadata.InstanceMetadata, error) {
return m.ListResp, m.ListErr
}
func (m *stubProviderMetadata) Self(ctx context.Context) (metadata.InstanceMetadata, error) {
return m.SelfResp, m.SelfErr
}
func (m *stubProviderMetadata) GetInstance(ctx context.Context, providerID string) (metadata.InstanceMetadata, error) {
return m.GetInstanceResp, m.GetInstanceErr
}
func (m *stubProviderMetadata) Supported() bool {
return m.SupportedResp
}
func (m *stubProviderMetadata) SupportsLoadBalancer() bool {
return m.SupportsLoadBalancerResp
}
type stubCloudControllerManager struct {
SupportedResp bool
}
func (m *stubCloudControllerManager) Image() string {
return "stub-image:latest"
}
func (m *stubCloudControllerManager) Path() string {
return "/stub-controller-manager"
}
func (m *stubCloudControllerManager) Name() string {
return "stub"
}
func (m *stubCloudControllerManager) ExtraArgs() []string {
return []string{}
}
func (m *stubCloudControllerManager) ConfigMaps(instance metadata.InstanceMetadata) (resources.ConfigMaps, error) {
return []*k8s.ConfigMap{}, nil
}
func (m *stubCloudControllerManager) Secrets(ctx context.Context, instance, cloudServiceAccountURI string) (resources.Secrets, error) {
return []*k8s.Secret{}, nil
}
func (m *stubCloudControllerManager) Volumes() []k8s.Volume {
return []k8s.Volume{}
}
func (m *stubCloudControllerManager) VolumeMounts() []k8s.VolumeMount {
return []k8s.VolumeMount{}
}
func (m *stubCloudControllerManager) Env() []k8s.EnvVar {
return []k8s.EnvVar{}
}
func (m *stubCloudControllerManager) Supported() bool {
return m.SupportedResp
}
type stubCloudNodeManager struct {
SupportedResp bool
ImageResp string
PathResp string
ExtraArgsResp []string
}
func (m *stubCloudNodeManager) Image() string {
return m.ImageResp
}
func (m *stubCloudNodeManager) Path() string {
return m.PathResp
}
func (m *stubCloudNodeManager) ExtraArgs() []string {
return m.ExtraArgsResp
}
func (m *stubCloudNodeManager) Supported() bool {
return m.SupportedResp
}
type stubClusterAutoscaler struct {
SupportedResp bool
}
func (a *stubClusterAutoscaler) Name() string {
return "stub"
}
// Secrets returns a list of secrets to deploy together with the k8s cluster-autoscaler.
func (a *stubClusterAutoscaler) Secrets(instance, cloudServiceAccountURI string) (resources.Secrets, error) {
return resources.Secrets{}, nil
}
// Volumes returns a list of volumes to deploy together with the k8s cluster-autoscaler.
func (a *stubClusterAutoscaler) Volumes() []k8s.Volume {
return []k8s.Volume{}
}
// VolumeMounts returns a list of volume mounts to deploy together with the k8s cluster-autoscaler.
func (a *stubClusterAutoscaler) VolumeMounts() []k8s.VolumeMount {
return []k8s.VolumeMount{}
}
// Env returns a list of k8s environment key-value pairs to deploy together with the k8s cluster-autoscaler.
func (a *stubClusterAutoscaler) Env() []k8s.EnvVar {
return []k8s.EnvVar{}
}
func (a *stubClusterAutoscaler) Supported() bool {
return a.SupportedResp
}

View file

@ -0,0 +1,216 @@
package k8sapi
import (
"archive/tar"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"io/fs"
"net/http"
"os"
"path"
"github.com/spf13/afero"
"golang.org/x/text/transform"
)
// osInstaller installs binary components of supported kubernetes versions.
type osInstaller struct {
fs *afero.Afero
hClient httpClient
}
// newOSInstaller creates a new osInstaller.
func newOSInstaller() *osInstaller {
return &osInstaller{
fs: &afero.Afero{Fs: afero.NewOsFs()},
hClient: &http.Client{},
}
}
// Install downloads a resource from a URL, applies any given text transformations and extracts the resulting file if required.
// The resulting file(s) are copied to all destinations.
func (i *osInstaller) Install(
ctx context.Context, sourceURL string, destinations []string, perm fs.FileMode,
extract bool, transforms ...transform.Transformer,
) error {
tempPath, err := i.downloadToTempDir(ctx, sourceURL, transforms...)
if err != nil {
return err
}
defer func() {
_ = i.fs.Remove(tempPath)
}()
for _, destination := range destinations {
var err error
if extract {
err = i.extractArchive(tempPath, destination, perm)
} else {
err = i.copy(tempPath, destination, perm)
}
if err != nil {
return fmt.Errorf("installing from %q: copying to destination %q: %w", sourceURL, destination, err)
}
}
return nil
}
// extractArchive extracts tar gz archives to a prefixed destination.
func (i *osInstaller) extractArchive(archivePath, prefix string, perm fs.FileMode) error {
archiveFile, err := i.fs.Open(archivePath)
if err != nil {
return fmt.Errorf("opening archive file: %w", err)
}
defer archiveFile.Close()
gzReader, err := gzip.NewReader(archiveFile)
if err != nil {
return fmt.Errorf("reading archive file as gzip: %w", err)
}
defer gzReader.Close()
if err := i.fs.MkdirAll(prefix, fs.ModePerm); err != nil {
return fmt.Errorf("creating prefix folder: %w", err)
}
tarReader := tar.NewReader(gzReader)
for {
header, err := tarReader.Next()
if err == io.EOF {
return nil
}
if err != nil {
return fmt.Errorf("parsing tar header: %w", err)
}
if err := verifyTarPath(header.Name); err != nil {
return fmt.Errorf("verifying tar path %q: %w", header.Name, err)
}
switch header.Typeflag {
case tar.TypeDir:
if len(header.Name) == 0 {
return errors.New("cannot create dir for empty path")
}
prefixedPath := path.Join(prefix, header.Name)
if err := i.fs.Mkdir(prefixedPath, fs.FileMode(header.Mode)&perm); err != nil && !errors.Is(err, os.ErrExist) {
return fmt.Errorf("creating folder %q: %w", prefixedPath, err)
}
case tar.TypeReg:
if len(header.Name) == 0 {
return errors.New("cannot create file for empty path")
}
prefixedPath := path.Join(prefix, header.Name)
out, err := i.fs.OpenFile(prefixedPath, os.O_WRONLY|os.O_CREATE, fs.FileMode(header.Mode))
if err != nil {
return fmt.Errorf("creating file %q for writing: %w", prefixedPath, err)
}
defer out.Close()
if _, err := io.Copy(out, tarReader); err != nil {
return fmt.Errorf("writing extracted file contents: %w", err)
}
case tar.TypeSymlink:
if err := verifyTarPath(header.Linkname); err != nil {
return fmt.Errorf("invalid tar path %q: %w", header.Linkname, err)
}
if len(header.Name) == 0 {
return errors.New("cannot symlink file for empty oldname")
}
if len(header.Linkname) == 0 {
return errors.New("cannot symlink file for empty newname")
}
if symlinker, ok := i.fs.Fs.(afero.Symlinker); ok {
if err := symlinker.SymlinkIfPossible(path.Join(prefix, header.Name), path.Join(prefix, header.Linkname)); err != nil {
return fmt.Errorf("creating symlink: %w", err)
}
} else {
return errors.New("fs does not support symlinks")
}
default:
return fmt.Errorf("unsupported tar record: %v", header.Typeflag)
}
}
}
// downloadToTempDir downloads a file to a temporary location, applying transform on-the-fly.
func (i *osInstaller) downloadToTempDir(ctx context.Context, url string, transforms ...transform.Transformer) (string, error) {
out, err := afero.TempFile(i.fs, "", "")
if err != nil {
return "", fmt.Errorf("creating destination temp file: %w", err)
}
defer out.Close()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return "", fmt.Errorf("request to download %q: %w", url, err)
}
resp, err := i.hClient.Do(req)
if err != nil {
return "", fmt.Errorf("request to download %q: %w", url, err)
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("request to download %q failed with status code: %v", url, resp.Status)
}
defer resp.Body.Close()
transformReader := transform.NewReader(resp.Body, transform.Chain(transforms...))
if _, err = io.Copy(out, transformReader); err != nil {
return "", fmt.Errorf("downloading %q: %w", url, err)
}
return out.Name(), nil
}
// copy copies a file from oldname to newname.
func (i *osInstaller) copy(oldname, newname string, perm fs.FileMode) (err error) {
old, openOldErr := i.fs.OpenFile(oldname, os.O_RDONLY, fs.ModePerm)
if openOldErr != nil {
return fmt.Errorf("copying %q to %q: cannot open source file for reading: %w", oldname, newname, openOldErr)
}
defer func() { _ = old.Close() }()
// create destination path if not exists
if err := i.fs.MkdirAll(path.Dir(newname), fs.ModePerm); err != nil {
return fmt.Errorf("copying %q to %q: unable to create destination folder: %w", oldname, newname, err)
}
new, openNewErr := i.fs.OpenFile(newname, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, perm)
if openNewErr != nil {
return fmt.Errorf("copying %q to %q: cannot open destination file for writing: %w", oldname, newname, openNewErr)
}
defer func() {
_ = new.Close()
if err != nil {
_ = i.fs.Remove(newname)
}
}()
if _, err := io.Copy(new, old); err != nil {
return fmt.Errorf("copying %q to %q: copying file contents: %w", oldname, newname, err)
}
return nil
}
// verifyTarPath checks if a tar path is valid (must not contain ".." as path element).
func verifyTarPath(pat string) error {
n := len(pat)
r := 0
for r < n {
switch {
case os.IsPathSeparator(pat[r]):
// empty path element
r++
case pat[r] == '.' && (r+1 == n || os.IsPathSeparator(pat[r+1])):
// . element
r++
case pat[r] == '.' && pat[r+1] == '.' && (r+2 == n || os.IsPathSeparator(pat[r+2])):
// .. element
return errors.New("path contains \"..\"")
default:
// skip to next path element
for r < n && !os.IsPathSeparator(pat[r]) {
r++
}
}
}
return nil
}
type httpClient interface {
Do(req *http.Request) (*http.Response, error)
}

View file

@ -0,0 +1,632 @@
package k8sapi
import (
"archive/tar"
"bufio"
"bytes"
"compress/gzip"
"context"
"io"
"io/fs"
"net"
"net/http"
"net/http/httptest"
"path"
"testing"
"github.com/hashicorp/go-multierror"
"github.com/icholy/replace"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/transform"
"google.golang.org/grpc/test/bufconn"
)
func TestInstall(t *testing.T) {
testCases := map[string]struct {
server httpBufconnServer
destination string
extract bool
transforms []transform.Transformer
readonly bool
wantErr bool
wantFiles map[string][]byte
}{
"download works": {
server: newHTTPBufconnServerWithBody([]byte("file-contents")),
destination: "/destination",
wantFiles: map[string][]byte{"/destination": []byte("file-contents")},
},
"download with extract works": {
server: newHTTPBufconnServerWithBody(createTarGz([]byte("file-contents"), "/destination")),
destination: "/prefix",
extract: true,
wantFiles: map[string][]byte{"/prefix/destination": []byte("file-contents")},
},
"download with transform works": {
server: newHTTPBufconnServerWithBody([]byte("/usr/bin/kubelet")),
destination: "/destination",
transforms: []transform.Transformer{
replace.String("/usr/bin", "/run/state/bin"),
},
wantFiles: map[string][]byte{"/destination": []byte("/run/state/bin/kubelet")},
},
"download fails": {
server: newHTTPBufconnServer(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(500) }),
destination: "/destination",
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
defer tc.server.Close()
hClient := http.Client{
Transport: &http.Transport{
DialContext: tc.server.DialContext,
Dial: tc.server.Dial,
DialTLSContext: tc.server.DialContext,
DialTLS: tc.server.Dial,
},
}
inst := osInstaller{
fs: &afero.Afero{Fs: afero.NewMemMapFs()},
hClient: &hClient,
}
err := inst.Install(context.Background(), "http://server/path", []string{tc.destination}, fs.ModePerm, tc.extract, tc.transforms...)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
for path, wantContents := range tc.wantFiles {
contents, err := inst.fs.ReadFile(path)
assert.NoError(err)
assert.Equal(wantContents, contents)
}
})
}
}
func TestExtractArchive(t *testing.T) {
tarGzTestFile := createTarGz([]byte("file-contents"), "/destination")
tarGzTestWithFolder := createTarGzWithFolder([]byte("file-contents"), "/folder/destination", nil)
testCases := map[string]struct {
source string
destination string
contents []byte
readonly bool
wantErr bool
wantFiles map[string][]byte
}{
"extract works": {
source: "in.tar.gz",
destination: "/prefix",
contents: tarGzTestFile,
wantFiles: map[string][]byte{
"/prefix/destination": []byte("file-contents"),
},
},
"extract with folder works": {
source: "in.tar.gz",
destination: "/prefix",
contents: tarGzTestWithFolder,
wantFiles: map[string][]byte{
"/prefix/folder/destination": []byte("file-contents"),
},
},
"source missing": {
source: "in.tar.gz",
destination: "/prefix",
wantErr: true,
},
"non-gzip file contents": {
source: "in.tar.gz",
contents: []byte("invalid bytes"),
destination: "/prefix",
wantErr: true,
},
"non-tar file contents": {
source: "in.tar.gz",
contents: createGz([]byte("file-contents")),
destination: "/prefix",
wantErr: true,
},
"mkdir prefix dir fails on RO fs": {
source: "in.tar.gz",
contents: tarGzTestFile,
destination: "/prefix",
readonly: true,
wantErr: true,
},
"mkdir tar dir fails on RO fs": {
source: "in.tar.gz",
contents: tarGzTestWithFolder,
destination: "/",
readonly: true,
wantErr: true,
},
"writing tar file fails on RO fs": {
source: "in.tar.gz",
contents: tarGzTestFile,
destination: "/",
readonly: true,
wantErr: true,
},
"symlink can be detected (but is unsupported on memmapfs)": {
source: "in.tar.gz",
contents: createTarGzWithSymlink("source", "dest"),
destination: "/prefix",
wantErr: true,
},
"unsupported tar header type is detected": {
source: "in.tar.gz",
contents: createTarGzWithFifo("/destination"),
destination: "/prefix",
wantErr: true,
},
"path traversal is detected": {
source: "in.tar.gz",
contents: createTarGz([]byte{}, "../destination"),
wantErr: true,
},
"path traversal in symlink is detected": {
source: "in.tar.gz",
contents: createTarGzWithSymlink("/source", "../destination"),
wantErr: true,
},
"empty file name is detected": {
source: "in.tar.gz",
contents: createTarGz([]byte{}, ""),
wantErr: true,
},
"empty folder name is detected": {
source: "in.tar.gz",
contents: createTarGzWithFolder([]byte{}, "source", stringPtr("")),
wantErr: true,
},
"empty symlink source is detected": {
source: "in.tar.gz",
contents: createTarGzWithSymlink("", "/target"),
wantErr: true,
},
"empty symlink target is detected": {
source: "in.tar.gz",
contents: createTarGzWithSymlink("/source", ""),
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
afs := afero.NewMemMapFs()
if len(tc.source) > 0 && len(tc.contents) > 0 {
require.NoError(afero.WriteFile(afs, tc.source, tc.contents, fs.ModePerm))
}
if tc.readonly {
afs = afero.NewReadOnlyFs(afs)
}
inst := osInstaller{
fs: &afero.Afero{Fs: afs},
}
err := inst.extractArchive(tc.source, tc.destination, fs.ModePerm)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
for path, wantContents := range tc.wantFiles {
contents, err := inst.fs.ReadFile(path)
assert.NoError(err)
assert.Equal(wantContents, contents)
}
})
}
}
func TestDownloadToTempDir(t *testing.T) {
testCases := map[string]struct {
server httpBufconnServer
transforms []transform.Transformer
readonly bool
wantErr bool
wantFile []byte
}{
"download works": {
server: newHTTPBufconnServerWithBody([]byte("file-contents")),
wantFile: []byte("file-contents"),
},
"download with transform works": {
server: newHTTPBufconnServerWithBody([]byte("/usr/bin/kubelet")),
transforms: []transform.Transformer{
replace.String("/usr/bin", "/run/state/bin"),
},
wantFile: []byte("/run/state/bin/kubelet"),
},
"download fails": {
server: newHTTPBufconnServer(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(500) }),
wantErr: true,
},
"creating temp file fails on RO fs": {
server: newHTTPBufconnServerWithBody([]byte("file-contents")),
readonly: true,
wantErr: true,
},
"content length mismatch": {
server: newHTTPBufconnServer(func(writer http.ResponseWriter, request *http.Request) {
writer.Header().Set("Content-Length", "1337")
writer.WriteHeader(200)
}),
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
defer tc.server.Close()
hClient := http.Client{
Transport: &http.Transport{
DialContext: tc.server.DialContext,
Dial: tc.server.Dial,
DialTLSContext: tc.server.DialContext,
DialTLS: tc.server.Dial,
},
}
afs := afero.NewMemMapFs()
if tc.readonly {
afs = afero.NewReadOnlyFs(afs)
}
inst := osInstaller{
fs: &afero.Afero{Fs: afs},
hClient: &hClient,
}
path, err := inst.downloadToTempDir(context.Background(), "http://server/path", tc.transforms...)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
contents, err := inst.fs.ReadFile(path)
assert.NoError(err)
assert.Equal(tc.wantFile, contents)
})
}
}
func TestCopy(t *testing.T) {
contents := []byte("file-contents")
existingFile := "/source"
testCases := map[string]struct {
oldname string
newname string
perm fs.FileMode
readonly bool
wantErr bool
}{
"copy works": {
oldname: existingFile,
newname: "/destination",
perm: fs.ModePerm,
},
"oldname does not exist": {
oldname: "missing",
newname: "/destination",
wantErr: true,
},
"copy on readonly fs fails": {
oldname: existingFile,
newname: "/destination",
perm: fs.ModePerm,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
afs := afero.NewMemMapFs()
require.NoError(afero.WriteFile(afs, existingFile, contents, fs.ModePerm))
if tc.readonly {
afs = afero.NewReadOnlyFs(afs)
}
inst := osInstaller{fs: &afero.Afero{Fs: afs}}
err := inst.copy(tc.oldname, tc.newname, tc.perm)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
oldfile, err := afs.Open(tc.oldname)
assert.NoError(err)
newfile, err := afs.Open(tc.newname)
assert.NoError(err)
oldContents, _ := io.ReadAll(oldfile)
newContents, _ := io.ReadAll(newfile)
assert.Equal(oldContents, newContents)
newStat, _ := newfile.Stat()
assert.Equal(tc.perm, newStat.Mode())
})
}
}
func TestVerifyTarPath(t *testing.T) {
testCases := map[string]struct {
path string
wantErr bool
}{
"valid relative path": {
path: "a/b/c",
},
"valid absolute path": {
path: "/a/b/c",
},
"valid path with dot": {
path: "/a/b/.d",
},
"valid path with dots": {
path: "/a/b/..d",
},
"single dot in path is allowed": {
path: ".",
},
"simple path traversal": {
path: "..",
wantErr: true,
},
"simple path traversal 2": {
path: "../",
wantErr: true,
},
"simple path traversal 3": {
path: "/..",
wantErr: true,
},
"simple path traversal 4": {
path: "/../",
wantErr: true,
},
"complex relative path traversal": {
path: "a/b/c/../../../../c/d/e",
wantErr: true,
},
"complex absolute path traversal": {
path: "/a/b/c/../../../../c/d/e",
wantErr: true,
},
"path traversal at the end": {
path: "a/..",
wantErr: true,
},
"path traversal at the end with trailing /": {
path: "a/../",
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
err := verifyTarPath(tc.path)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal(tc.path, path.Clean(tc.path))
})
}
}
type httpBufconnServer struct {
*httptest.Server
*bufconn.Listener
}
func (s *httpBufconnServer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
return s.Listener.DialContext(ctx)
}
func (s *httpBufconnServer) Dial(network, addr string) (net.Conn, error) {
return s.Listener.Dial()
}
func (s *httpBufconnServer) Close() {
s.Server.Close()
s.Listener.Close()
}
func newHTTPBufconnServer(handlerFunc http.HandlerFunc) httpBufconnServer {
server := httptest.NewUnstartedServer(handlerFunc)
listener := bufconn.Listen(1024)
server.Listener = listener
server.Start()
return httpBufconnServer{
Server: server,
Listener: listener,
}
}
func newHTTPBufconnServerWithBody(body []byte) httpBufconnServer {
return newHTTPBufconnServer(func(writer http.ResponseWriter, request *http.Request) {
if _, err := writer.Write(body); err != nil {
panic(err)
}
})
}
func createTarGz(contents []byte, path string) []byte {
tgzWriter := newTarGzWriter()
defer func() { _ = tgzWriter.Close() }()
if err := tgzWriter.writeHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: path,
Size: int64(len(contents)),
Mode: int64(fs.ModePerm),
}); err != nil {
panic(err)
}
if _, err := tgzWriter.writeTar(contents); err != nil {
panic(err)
}
return tgzWriter.Bytes()
}
func createTarGzWithFolder(contents []byte, pat string, dirnameOverride *string) []byte {
tgzWriter := newTarGzWriter()
defer func() { _ = tgzWriter.Close() }()
dir := path.Dir(pat)
if dirnameOverride != nil {
dir = *dirnameOverride
}
if err := tgzWriter.writeHeader(&tar.Header{
Typeflag: tar.TypeDir,
Name: dir,
Mode: int64(fs.ModePerm),
}); err != nil {
panic(err)
}
if err := tgzWriter.writeHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: pat,
Size: int64(len(contents)),
Mode: int64(fs.ModePerm),
}); err != nil {
panic(err)
}
if _, err := tgzWriter.writeTar(contents); err != nil {
panic(err)
}
return tgzWriter.Bytes()
}
func createTarGzWithSymlink(oldname, newname string) []byte {
tgzWriter := newTarGzWriter()
defer func() { _ = tgzWriter.Close() }()
if err := tgzWriter.writeHeader(&tar.Header{
Typeflag: tar.TypeSymlink,
Name: oldname,
Linkname: newname,
Mode: int64(fs.ModePerm),
}); err != nil {
panic(err)
}
return tgzWriter.Bytes()
}
func createTarGzWithFifo(name string) []byte {
tgzWriter := newTarGzWriter()
defer func() { _ = tgzWriter.Close() }()
if err := tgzWriter.writeHeader(&tar.Header{
Typeflag: tar.TypeFifo,
Name: name,
Mode: int64(fs.ModePerm),
}); err != nil {
panic(err)
}
return tgzWriter.Bytes()
}
func createGz(contents []byte) []byte {
tgzWriter := newTarGzWriter()
defer func() { _ = tgzWriter.Close() }()
if _, err := tgzWriter.writeGz(contents); err != nil {
panic(err)
}
return tgzWriter.Bytes()
}
type tarGzWriter struct {
buf *bytes.Buffer
bufWriter *bufio.Writer
gzWriter *gzip.Writer
tarWriter *tar.Writer
}
func newTarGzWriter() *tarGzWriter {
var buf bytes.Buffer
bufWriter := bufio.NewWriter(&buf)
gzipWriter := gzip.NewWriter(bufWriter)
tarWriter := tar.NewWriter(gzipWriter)
return &tarGzWriter{
buf: &buf,
bufWriter: bufWriter,
gzWriter: gzipWriter,
tarWriter: tarWriter,
}
}
func (w *tarGzWriter) writeHeader(hdr *tar.Header) error {
return w.tarWriter.WriteHeader(hdr)
}
func (w *tarGzWriter) writeTar(b []byte) (int, error) {
return w.tarWriter.Write(b)
}
func (w *tarGzWriter) writeGz(b []byte) (int, error) {
return w.gzWriter.Write(b)
}
func (w *tarGzWriter) Bytes() []byte {
_ = w.tarWriter.Flush()
_ = w.gzWriter.Flush()
_ = w.gzWriter.Close() // required to ensure clean EOF in gz reader
_ = w.bufWriter.Flush()
return w.buf.Bytes()
}
func (w *tarGzWriter) Close() (result error) {
if err := w.tarWriter.Close(); err != nil {
result = multierror.Append(result, err)
}
if err := w.gzWriter.Close(); err != nil {
result = multierror.Append(result, err)
}
return result
}
func stringPtr(s string) *string {
return &s
}

View file

@ -0,0 +1,49 @@
package k8sapi
import (
"flag"
"fmt"
"github.com/google/shlex"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
func ParseJoinCommand(joinCommand string) (*kubeadm.BootstrapTokenDiscovery, error) {
// Format:
// kubeadm join [API_SERVER_ENDPOINT] --token [TOKEN] --discovery-token-ca-cert-hash [DISCOVERY_TOKEN_CA_CERT_HASH] --control-plane
// split and verify that this is a kubeadm join command
argv, err := shlex.Split(joinCommand)
if err != nil {
return nil, fmt.Errorf("kubadm join command could not be tokenized: %v", joinCommand)
}
if len(argv) < 3 {
return nil, fmt.Errorf("kubadm join command is too short: %v", argv)
}
if argv[0] != "kubeadm" || argv[1] != "join" {
return nil, fmt.Errorf("not a kubeadm join command: %v", argv)
}
result := kubeadm.BootstrapTokenDiscovery{APIServerEndpoint: argv[2]}
var caCertHash string
// parse flags
flags := flag.NewFlagSet("", flag.ContinueOnError)
flags.StringVar(&result.Token, "token", "", "")
flags.StringVar(&caCertHash, "discovery-token-ca-cert-hash", "", "")
flags.Bool("control-plane", false, "")
flags.String("certificate-key", "", "")
if err := flags.Parse(argv[3:]); err != nil {
return nil, fmt.Errorf("parsing flag arguments: %v %w", argv, err)
}
if result.Token == "" {
return nil, fmt.Errorf("missing flag argument token: %v", argv)
}
if caCertHash == "" {
return nil, fmt.Errorf("missing flag argument discovery-token-ca-cert-hash: %v", argv)
}
result.CACertHashes = []string{caCertHash}
return &result, nil
}

View file

@ -0,0 +1,69 @@
package k8sapi
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
func TestParseJoinCommand(t *testing.T) {
testCases := map[string]struct {
joinCommand string
wantJoinArgs kubeadm.BootstrapTokenDiscovery
wantErr bool
}{
"join command can be parsed": {
joinCommand: "kubeadm join 192.0.2.0:8443 --token dummy-token --discovery-token-ca-cert-hash sha512:dummy-hash --control-plane",
wantJoinArgs: kubeadm.BootstrapTokenDiscovery{
APIServerEndpoint: "192.0.2.0:8443",
Token: "dummy-token",
CACertHashes: []string{"sha512:dummy-hash"},
},
wantErr: false,
},
"incorrect join command returns error": {
joinCommand: "some string",
wantErr: true,
},
"missing api server endpoint is checked": {
joinCommand: "kubeadm join --token dummy-token --discovery-token-ca-cert-hash sha512:dummy-hash --control-plane",
wantErr: true,
},
"missing token is checked": {
joinCommand: "kubeadm join 192.0.2.0:8443 --discovery-token-ca-cert-hash sha512:dummy-hash --control-plane",
wantErr: true,
},
"missing discovery-token-ca-cert-hash is checked": {
joinCommand: "kubeadm join 192.0.2.0:8443 --token dummy-token --control-plane",
wantErr: true,
},
"missing control-plane": {
joinCommand: "kubeadm join 192.0.2.0:8443 --token dummy-token --discovery-token-ca-cert-hash sha512:dummy-hash",
wantJoinArgs: kubeadm.BootstrapTokenDiscovery{
APIServerEndpoint: "192.0.2.0:8443",
Token: "dummy-token",
CACertHashes: []string{"sha512:dummy-hash"},
},
wantErr: false,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
joinArgs, err := ParseJoinCommand(tc.joinCommand)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal(&tc.wantJoinArgs, joinArgs)
})
}
}

View file

@ -0,0 +1,289 @@
package k8sapi
import (
"path/filepath"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
"github.com/edgelesssys/constellation/internal/constants"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconf "k8s.io/kubelet/config/v1beta1"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
// Uses types defined here: https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/
// Slimmed down to the fields we require
const (
bindPort = 6443
auditLogDir = "/var/log/kubernetes/audit/"
auditLogFile = "audit.log"
auditPolicyPath = "/etc/kubernetes/audit-policy.yaml"
)
type CoreOSConfiguration struct{}
func (c *CoreOSConfiguration) InitConfiguration(externalCloudProvider bool) KubeadmInitYAML {
var cloudProvider string
if externalCloudProvider {
cloudProvider = "external"
}
return KubeadmInitYAML{
InitConfiguration: kubeadm.InitConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: kubeadm.SchemeGroupVersion.String(),
Kind: "InitConfiguration",
},
NodeRegistration: kubeadm.NodeRegistrationOptions{
CRISocket: "/run/containerd/containerd.sock",
KubeletExtraArgs: map[string]string{
"cloud-provider": cloudProvider,
"network-plugin": "cni",
},
},
// AdvertiseAddress will be overwritten later
LocalAPIEndpoint: kubeadm.APIEndpoint{
BindPort: bindPort,
},
},
ClusterConfiguration: kubeadm.ClusterConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterConfiguration",
APIVersion: kubeadm.SchemeGroupVersion.String(),
},
KubernetesVersion: constants.KubernetesVersion,
// necessary to be able to access the kubeapi server through localhost
APIServer: kubeadm.APIServer{
ControlPlaneComponent: kubeadm.ControlPlaneComponent{
ExtraArgs: map[string]string{
"audit-policy-file": auditPolicyPath,
"audit-log-path": filepath.Join(auditLogDir, auditLogFile), // CIS benchmark
"audit-log-maxage": "30", // CIS benchmark - Default value of Rancher
"audit-log-maxbackup": "10", // CIS benchmark - Default value of Rancher
"audit-log-maxsize": "100", // CIS benchmark - Default value of Rancher
"profiling": "false", // CIS benchmark
"tls-cipher-suites": "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256," +
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256," +
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384," +
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256," +
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256," +
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305," +
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA," +
"TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384", // CIS benchmark
},
ExtraVolumes: []kubeadm.HostPathMount{
{
Name: "audit-log",
HostPath: auditLogDir,
MountPath: auditLogDir,
ReadOnly: false,
PathType: corev1.HostPathDirectoryOrCreate,
},
{
Name: "audit",
HostPath: auditPolicyPath,
MountPath: auditPolicyPath,
ReadOnly: true,
PathType: corev1.HostPathFile,
},
},
},
CertSANs: []string{"127.0.0.1", "10.118.0.1"},
},
ControllerManager: kubeadm.ControlPlaneComponent{
ExtraArgs: map[string]string{
"flex-volume-plugin-dir": "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/",
"cloud-provider": cloudProvider,
"configure-cloud-routes": "false",
"profiling": "false", // CIS benchmark
"terminated-pod-gc-threshold": "1000", // CIS benchmark - Default value of Rancher
},
},
Scheduler: kubeadm.ControlPlaneComponent{
ExtraArgs: map[string]string{
"profiling": "false",
},
},
},
// warning: this config is applied to every node in the cluster!
KubeletConfiguration: kubeletconf.KubeletConfiguration{
ProtectKernelDefaults: true, // CIS benchmark
TLSCipherSuites: []string{
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_128_GCM_SHA256",
}, // CIS benchmark
TypeMeta: metav1.TypeMeta{
APIVersion: kubeletconf.SchemeGroupVersion.String(),
Kind: "KubeletConfiguration",
},
RegisterWithTaints: []corev1.Taint{
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Value: "true",
Effect: corev1.TaintEffectPreferNoSchedule,
},
{
Key: "node.cilium.io/agent-not-ready",
Value: "true",
Effect: corev1.TaintEffectPreferNoSchedule,
},
},
},
}
}
func (c *CoreOSConfiguration) JoinConfiguration(externalCloudProvider bool) KubeadmJoinYAML {
var cloudProvider string
if externalCloudProvider {
cloudProvider = "external"
}
return KubeadmJoinYAML{
JoinConfiguration: kubeadm.JoinConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: kubeadm.SchemeGroupVersion.String(),
Kind: "JoinConfiguration",
},
NodeRegistration: kubeadm.NodeRegistrationOptions{
CRISocket: "/run/containerd/containerd.sock",
KubeletExtraArgs: map[string]string{
"cloud-provider": cloudProvider,
},
},
Discovery: kubeadm.Discovery{
BootstrapToken: &kubeadm.BootstrapTokenDiscovery{},
},
},
KubeletConfiguration: kubeletconf.KubeletConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: kubeletconf.SchemeGroupVersion.String(),
Kind: "KubeletConfiguration",
},
},
}
}
type KubeadmJoinYAML struct {
JoinConfiguration kubeadm.JoinConfiguration
KubeletConfiguration kubeletconf.KubeletConfiguration
}
func (k *KubeadmJoinYAML) SetNodeName(nodeName string) {
k.JoinConfiguration.NodeRegistration.Name = nodeName
}
func (k *KubeadmJoinYAML) SetApiServerEndpoint(apiServerEndpoint string) {
k.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint
}
func (k *KubeadmJoinYAML) SetToken(token string) {
k.JoinConfiguration.Discovery.BootstrapToken.Token = token
}
func (k *KubeadmJoinYAML) AppendDiscoveryTokenCaCertHash(discoveryTokenCaCertHash string) {
k.JoinConfiguration.Discovery.BootstrapToken.CACertHashes = append(k.JoinConfiguration.Discovery.BootstrapToken.CACertHashes, discoveryTokenCaCertHash)
}
func (k *KubeadmJoinYAML) SetNodeIP(nodeIP string) {
if k.JoinConfiguration.NodeRegistration.KubeletExtraArgs == nil {
k.JoinConfiguration.NodeRegistration.KubeletExtraArgs = map[string]string{"node-ip": nodeIP}
} else {
k.JoinConfiguration.NodeRegistration.KubeletExtraArgs["node-ip"] = nodeIP
}
}
func (k *KubeadmJoinYAML) SetProviderID(providerID string) {
k.KubeletConfiguration.ProviderID = providerID
}
func (k *KubeadmJoinYAML) SetControlPlane(advertiseAddress string, certificateKey string) {
k.JoinConfiguration.ControlPlane = &kubeadm.JoinControlPlane{
LocalAPIEndpoint: kubeadm.APIEndpoint{
AdvertiseAddress: advertiseAddress,
BindPort: 6443,
},
CertificateKey: certificateKey,
}
}
func (k *KubeadmJoinYAML) Marshal() ([]byte, error) {
return resources.MarshalK8SResources(k)
}
func (k *KubeadmJoinYAML) Unmarshal(yamlData []byte) (KubeadmJoinYAML, error) {
var tmp KubeadmJoinYAML
return tmp, resources.UnmarshalK8SResources(yamlData, &tmp)
}
type KubeadmInitYAML struct {
InitConfiguration kubeadm.InitConfiguration
ClusterConfiguration kubeadm.ClusterConfiguration
KubeletConfiguration kubeletconf.KubeletConfiguration
}
func (k *KubeadmInitYAML) SetNodeName(nodeName string) {
k.InitConfiguration.NodeRegistration.Name = nodeName
}
// SetCertSANs sets the SANs for the certificate.
func (k *KubeadmInitYAML) SetCertSANs(certSANs []string) {
for _, certSAN := range certSANs {
if certSAN == "" {
continue
}
k.ClusterConfiguration.APIServer.CertSANs = append(k.ClusterConfiguration.APIServer.CertSANs, certSAN)
}
}
func (k *KubeadmInitYAML) SetApiServerAdvertiseAddress(apiServerAdvertiseAddress string) {
k.InitConfiguration.LocalAPIEndpoint.AdvertiseAddress = apiServerAdvertiseAddress
}
// SetControlPlaneEndpoint sets the control plane endpoint if controlPlaneEndpoint is not empty.
func (k *KubeadmInitYAML) SetControlPlaneEndpoint(controlPlaneEndpoint string) {
if controlPlaneEndpoint != "" {
k.ClusterConfiguration.ControlPlaneEndpoint = controlPlaneEndpoint
}
}
func (k *KubeadmInitYAML) SetServiceCIDR(serviceCIDR string) {
k.ClusterConfiguration.Networking.ServiceSubnet = serviceCIDR
}
func (k *KubeadmInitYAML) SetPodNetworkCIDR(podNetworkCIDR string) {
k.ClusterConfiguration.Networking.PodSubnet = podNetworkCIDR
}
func (k *KubeadmInitYAML) SetServiceDNSDomain(serviceDNSDomain string) {
k.ClusterConfiguration.Networking.DNSDomain = serviceDNSDomain
}
func (k *KubeadmInitYAML) SetNodeIP(nodeIP string) {
if k.InitConfiguration.NodeRegistration.KubeletExtraArgs == nil {
k.InitConfiguration.NodeRegistration.KubeletExtraArgs = map[string]string{"node-ip": nodeIP}
} else {
k.InitConfiguration.NodeRegistration.KubeletExtraArgs["node-ip"] = nodeIP
}
}
func (k *KubeadmInitYAML) SetProviderID(providerID string) {
if k.InitConfiguration.NodeRegistration.KubeletExtraArgs == nil {
k.InitConfiguration.NodeRegistration.KubeletExtraArgs = map[string]string{"provider-id": providerID}
} else {
k.InitConfiguration.NodeRegistration.KubeletExtraArgs["provider-id"] = providerID
}
}
func (k *KubeadmInitYAML) Marshal() ([]byte, error) {
return resources.MarshalK8SResources(k)
}
func (k *KubeadmInitYAML) Unmarshal(yamlData []byte) (KubeadmInitYAML, error) {
var tmp KubeadmInitYAML
return tmp, resources.UnmarshalK8SResources(yamlData, &tmp)
}

View file

@ -0,0 +1,89 @@
package k8sapi
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestInitConfiguration(t *testing.T) {
coreOSConfig := CoreOSConfiguration{}
testCases := map[string]struct {
config KubeadmInitYAML
}{
"CoreOS init config can be created": {
config: coreOSConfig.InitConfiguration(true),
},
"CoreOS init config with all fields can be created": {
config: func() KubeadmInitYAML {
c := coreOSConfig.InitConfiguration(true)
c.SetApiServerAdvertiseAddress("192.0.2.0")
c.SetNodeIP("192.0.2.0")
c.SetNodeName("node")
c.SetPodNetworkCIDR("10.244.0.0/16")
c.SetServiceCIDR("10.245.0.0/24")
c.SetProviderID("somecloudprovider://instance-id")
return c
}(),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
config, err := tc.config.Marshal()
require.NoError(err)
tmp, err := tc.config.Unmarshal(config)
require.NoError(err)
// test on correct mashalling and unmarshalling
assert.Equal(tc.config.ClusterConfiguration, tmp.ClusterConfiguration)
assert.Equal(tc.config.InitConfiguration, tmp.InitConfiguration)
})
}
}
func TestJoinConfiguration(t *testing.T) {
coreOSConfig := CoreOSConfiguration{}
testCases := map[string]struct {
config KubeadmJoinYAML
}{
"CoreOS join config can be created": {
config: coreOSConfig.JoinConfiguration(true),
},
"CoreOS join config with all fields can be created": {
config: func() KubeadmJoinYAML {
c := coreOSConfig.JoinConfiguration(true)
c.SetApiServerEndpoint("192.0.2.0:6443")
c.SetNodeIP("192.0.2.0")
c.SetNodeName("node")
c.SetToken("token")
c.AppendDiscoveryTokenCaCertHash("discovery-token-ca-cert-hash")
c.SetProviderID("somecloudprovider://instance-id")
c.SetControlPlane("192.0.2.0", "11111111111111111111111111111111111")
return c
}(),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
config, err := tc.config.Marshal()
require.NoError(err)
tmp, err := tc.config.Unmarshal(config)
require.NoError(err)
// test on correct mashalling and unmarshalling
assert.Equal(tc.config.JoinConfiguration, tmp.JoinConfiguration)
})
}
}

View file

@ -0,0 +1,90 @@
package client
import (
"bytes"
"fmt"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
const fieldManager = "constellation-bootstrapper"
// Client implements k8sapi.Client interface and talks to the Kubernetes API.
type Client struct {
clientset kubernetes.Interface
builder *resource.Builder
}
// New creates a new Client, talking to the real k8s API.
func New(config []byte) (*Client, error) {
clientConfig, err := clientcmd.RESTConfigFromKubeConfig(config)
if err != nil {
return nil, fmt.Errorf("creating k8s client config from kubeconfig: %w", err)
}
clientset, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, fmt.Errorf("creating k8s client from kubeconfig: %w", err)
}
restClientGetter, err := newRESTClientGetter(config)
if err != nil {
return nil, fmt.Errorf("creating k8s RESTClientGetter from kubeconfig: %w", err)
}
builder := resource.NewBuilder(restClientGetter).Unstructured()
return &Client{clientset: clientset, builder: builder}, nil
}
// ApplyOneObject uses server-side apply to send unstructured JSON blobs to the server and let it handle the core logic.
func (c *Client) ApplyOneObject(info *resource.Info, forceConflicts bool) error {
// helper can be used to patch k8s resources using server-side-apply.
helper := resource.NewHelper(info.Client, info.Mapping).
WithFieldManager(fieldManager)
// server-side-apply uses unstructured JSON instead of strict typing on the client side.
data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object)
if err != nil {
return fmt.Errorf("preparing resource for server-side apply: encoding of resource: %w", err)
}
options := metav1.PatchOptions{
Force: &forceConflicts,
}
obj, err := helper.Patch(
info.Namespace,
info.Name,
types.ApplyPatchType,
data,
&options,
)
if err != nil {
return fmt.Errorf("applying object %v using server-side apply: %w", info, err)
}
return info.Refresh(obj, true)
}
// GetObjects tries to marshal the resources into []*resource.Info using a resource.Builder.
func (c *Client) GetObjects(resources resources.Marshaler) ([]*resource.Info, error) {
// convert our resource struct into YAML
data, err := resources.Marshal()
if err != nil {
return nil, fmt.Errorf("converting resources to YAML: %w", err)
}
// read into resource.Info using builder
reader := bytes.NewReader(data)
result := c.builder.
ContinueOnError().
NamespaceParam("default").
DefaultNamespace().
Stream(reader, "yaml").
Flatten().
Do()
return result.Infos()
}

View file

@ -0,0 +1,280 @@
package client
import (
"bytes"
"errors"
"io"
"net/http"
"testing"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"google.golang.org/protobuf/proto"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
restfake "k8s.io/client-go/rest/fake"
"k8s.io/client-go/restmapper"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
var (
corev1GV = schema.GroupVersion{Version: "v1"}
nginxDeployment = &apps.Deployment{
TypeMeta: v1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app": "nginx",
},
Name: "my-nginx",
},
Spec: apps.DeploymentSpec{
Replicas: proto.Int32(3),
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{
"app": "nginx",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app": "nginx",
},
},
Spec: k8s.PodSpec{
Containers: []k8s.Container{
{
Name: "nginx",
Image: "nginx:1.14.2",
Ports: []k8s.ContainerPort{
{
ContainerPort: 80,
},
},
},
},
},
},
},
}
nginxDeplJSON, _ = marshalJSON(nginxDeployment)
nginxDeplYAML, _ = marshalYAML(nginxDeployment)
)
type unmarshableResource struct{}
func (*unmarshableResource) Marshal() ([]byte, error) {
return nil, errors.New("someErr")
}
func stringBody(body string) io.ReadCloser {
return io.NopCloser(bytes.NewReader([]byte(body)))
}
func fakeClientWith(t *testing.T, testName string, data map[string]string) resource.FakeClientFunc {
return func(version schema.GroupVersion) (resource.RESTClient, error) {
return &restfake.RESTClient{
GroupVersion: corev1GV,
NegotiatedSerializer: scheme.Codecs.WithoutConversion(),
Client: restfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
p := req.URL.Path
q := req.URL.RawQuery
if len(q) != 0 {
p = p + "?" + q
}
body, ok := data[p]
if !ok {
t.Fatalf("%s: unexpected request: %s (%s)\n%#v", testName, p, req.URL, req)
}
header := http.Header{}
header.Set("Content-Type", runtime.ContentTypeJSON)
return &http.Response{
StatusCode: http.StatusOK,
Header: header,
Body: stringBody(body),
}, nil
}),
}, nil
}
}
func newClientWithFakes(t *testing.T, data map[string]string, objects ...runtime.Object) Client {
clientset := fake.NewSimpleClientset(objects...)
builder := resource.NewFakeBuilder(
fakeClientWith(t, "", data),
func() (meta.RESTMapper, error) {
return testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme), nil
},
func() (restmapper.CategoryExpander, error) {
return resource.FakeCategoryExpander, nil
}).
Unstructured()
client := Client{
clientset: clientset,
builder: builder,
}
return client
}
func failingClient() resource.FakeClientFunc {
return func(version schema.GroupVersion) (resource.RESTClient, error) {
return &restfake.RESTClient{
GroupVersion: corev1GV,
NegotiatedSerializer: scheme.Codecs.WithoutConversion(),
Resp: &http.Response{StatusCode: 501},
}, nil
}
}
func newFailingClient(objects ...runtime.Object) Client {
clientset := fake.NewSimpleClientset(objects...)
builder := resource.NewFakeBuilder(
failingClient(),
func() (meta.RESTMapper, error) {
return testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme), nil
},
func() (restmapper.CategoryExpander, error) {
return resource.FakeCategoryExpander, nil
}).
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...)
client := Client{
clientset: clientset,
builder: builder,
}
return client
}
func marshalJSON(obj runtime.Object) ([]byte, error) {
serializer := json.NewSerializer(json.DefaultMetaFactory, nil, nil, false)
var buf bytes.Buffer
if err := serializer.Encode(obj, &buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func marshalYAML(obj runtime.Object) ([]byte, error) {
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, nil, nil)
var buf bytes.Buffer
if err := serializer.Encode(obj, &buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func TestApplyOneObject(t *testing.T) {
testCases := map[string]struct {
httpResponseData map[string]string
wantObj runtime.Object
resourcesYAML string
failingClient bool
wantErr bool
}{
"apply works": {
httpResponseData: map[string]string{
"/deployments/my-nginx?fieldManager=constellation-bootstrapper&force=true": string(nginxDeplJSON),
},
wantObj: nginxDeployment,
resourcesYAML: string(nginxDeplYAML),
wantErr: false,
},
"apply fails": {
httpResponseData: map[string]string{},
wantObj: nginxDeployment,
resourcesYAML: string(nginxDeplYAML),
failingClient: true,
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
var client Client
if tc.failingClient {
client = newFailingClient(tc.wantObj)
} else {
client = newClientWithFakes(t, tc.httpResponseData, tc.wantObj)
}
reader := bytes.NewReader([]byte(tc.resourcesYAML))
res := client.builder.
ContinueOnError().
Stream(reader, "yaml").
Flatten().
Do()
assert.NoError(res.Err())
infos, err := res.Infos()
assert.NoError(err)
require.Len(infos, 1)
err = client.ApplyOneObject(infos[0], true)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
})
}
}
func TestGetObjects(t *testing.T) {
testCases := map[string]struct {
wantResources resources.Marshaler
httpResponseData map[string]string
resourcesYAML string
wantErr bool
}{
"GetObjects works on cluster-autoscaler deployment": {
wantResources: resources.NewDefaultAutoscalerDeployment(nil, nil, nil),
resourcesYAML: string(nginxDeplYAML),
wantErr: false,
},
"GetObjects works on cloud-controller-manager deployment": {
wantResources: resources.NewDefaultCloudControllerManagerDeployment("someProvider", "someImage", "somePath", "someCIDR", nil, nil, nil, nil),
resourcesYAML: string(nginxDeplYAML),
wantErr: false,
},
"GetObjects Marshal failure detected": {
wantResources: &unmarshableResource{},
resourcesYAML: string(nginxDeplYAML),
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
client := newClientWithFakes(t, tc.httpResponseData)
infos, err := client.GetObjects(tc.wantResources)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
assert.NotNil(infos)
})
}
}

View file

@ -0,0 +1,64 @@
package client
import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
)
// restClientGetter implements k8s.io/cli-runtime/pkg/resource.RESTClientGetter.
type restClientGetter struct {
clientconfig clientcmd.ClientConfig
}
// newRESTClientGetter creates a new restClientGetter using a kubeconfig.
func newRESTClientGetter(kubeconfig []byte) (*restClientGetter, error) {
clientconfig, err := clientcmd.NewClientConfigFromBytes(kubeconfig)
if err != nil {
return nil, err
}
rawconfig, err := clientconfig.RawConfig()
if err != nil {
return nil, err
}
clientconfig = clientcmd.NewDefaultClientConfig(rawconfig, &clientcmd.ConfigOverrides{})
return &restClientGetter{clientconfig}, nil
}
// ToRESTConfig returns k8s REST client config.
func (r *restClientGetter) ToRESTConfig() (*rest.Config, error) {
return r.clientconfig.ClientConfig()
}
// ToDiscoveryClient creates new k8s discovery client from restClientGetter.
func (r *restClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
restconfig, err := r.clientconfig.ClientConfig()
if err != nil {
return nil, err
}
dc, err := discovery.NewDiscoveryClientForConfig(restconfig)
if err != nil {
return nil, err
}
return memory.NewMemCacheClient(dc), nil
}
// ToRESTMapper creates new k8s RESTMapper from restClientGetter.
func (r *restClientGetter) ToRESTMapper() (meta.RESTMapper, error) {
dc, err := r.ToDiscoveryClient()
if err != nil {
return nil, err
}
return restmapper.NewDeferredDiscoveryRESTMapper(dc), nil
}
// ToRawKubeConfigLoader returns the inner k8s ClientConfig.
func (r *restClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig {
return r.clientconfig
}

View file

@ -0,0 +1,137 @@
package client
import (
"errors"
"testing"
"github.com/stretchr/testify/require"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
const testingKubeconfig = `
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ""
server: https://192.0.2.0:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: ""
client-key-data: ""
`
type stubClientConfig struct {
RawConfigConfig clientcmdapi.Config
RawConfigErr error
ClientConfigConfig *restclient.Config
ClientConfigErr error
NamespaceString string
NamespaceOverridden bool
NamespaceErr error
ConfigAccessResult clientcmd.ConfigAccess
}
func (s *stubClientConfig) RawConfig() (clientcmdapi.Config, error) {
return s.RawConfigConfig, s.RawConfigErr
}
func (s *stubClientConfig) ClientConfig() (*restclient.Config, error) {
return s.ClientConfigConfig, s.ClientConfigErr
}
func (s *stubClientConfig) Namespace() (string, bool, error) {
return s.NamespaceString, s.NamespaceOverridden, s.NamespaceErr
}
func (s *stubClientConfig) ConfigAccess() clientcmd.ConfigAccess {
return s.ConfigAccessResult
}
func TestNewRESTClientGetter(t *testing.T) {
require := require.New(t)
result, err := newRESTClientGetter([]byte(testingKubeconfig))
require.NoError(err)
require.NotNil(result)
}
func TestToRESTConfig(t *testing.T) {
require := require.New(t)
getter := restClientGetter{
clientconfig: &stubClientConfig{
ClientConfigConfig: &restclient.Config{},
},
}
result, err := getter.ToRESTConfig()
require.NoError(err)
require.NotNil(result)
}
func TestToDiscoveryClient(t *testing.T) {
require := require.New(t)
getter := restClientGetter{
clientconfig: &stubClientConfig{
ClientConfigConfig: &restclient.Config{},
},
}
result, err := getter.ToDiscoveryClient()
require.NoError(err)
require.NotNil(result)
}
func TestToDiscoveryClientFail(t *testing.T) {
require := require.New(t)
getter := restClientGetter{
clientconfig: &stubClientConfig{
ClientConfigErr: errors.New("someErr"),
},
}
_, err := getter.ToDiscoveryClient()
require.Error(err)
}
func TestToRESTMapper(t *testing.T) {
require := require.New(t)
getter := restClientGetter{
clientconfig: &stubClientConfig{
ClientConfigConfig: &restclient.Config{},
},
}
result, err := getter.ToRESTMapper()
require.NoError(err)
require.NotNil(result)
}
func TestToRESTMapperFail(t *testing.T) {
require := require.New(t)
getter := restClientGetter{
clientconfig: &stubClientConfig{
ClientConfigErr: errors.New("someErr"),
},
}
_, err := getter.ToRESTMapper()
require.Error(err)
}
func TestToRawKubeConfigLoader(t *testing.T) {
clientConfig := stubClientConfig{
ClientConfigConfig: &restclient.Config{},
}
require := require.New(t)
getter := restClientGetter{
clientconfig: &clientConfig,
}
result := getter.ToRawKubeConfigLoader()
require.Equal(&clientConfig, result)
}

View file

@ -0,0 +1,11 @@
package kubectl
import "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/kubectl/client"
// generator implements clientGenerator interface.
type generator struct{}
// NewClients generates a new client implementing the Client interface.
func (generator) NewClient(kubeconfig []byte) (Client, error) {
return client.New(kubeconfig)
}

View file

@ -0,0 +1,68 @@
package kubectl
import (
"errors"
"fmt"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
"k8s.io/cli-runtime/pkg/resource"
)
// ErrKubeconfigNotSet is the error value returned by Kubectl.Apply when SetKubeconfig was not called first.
var ErrKubeconfigNotSet = errors.New("kubeconfig not set")
// Client wraps marshable k8s resources into resource.Info fields and applies them in a cluster.
type Client interface {
// ApplyOneObject applies a k8s resource similar to kubectl apply.
ApplyOneObject(info *resource.Info, forceConflicts bool) error
// GetObjects converts resources into prepared info fields for use in ApplyOneObject.
GetObjects(resources resources.Marshaler) ([]*resource.Info, error)
}
// clientGenerator can generate new clients from a kubeconfig.
type clientGenerator interface {
NewClient(kubeconfig []byte) (Client, error)
}
// Kubectl implements kubernetes.Apply interface and acts like the Kubernetes "kubectl" tool.
type Kubectl struct {
clientGenerator
kubeconfig []byte
}
// New creates a new kubectl using the real clientGenerator.
func New() *Kubectl {
return &Kubectl{
clientGenerator: &generator{},
}
}
// Apply will apply the given resources using server-side-apply.
func (k *Kubectl) Apply(resources resources.Marshaler, forceConflicts bool) error {
if k.kubeconfig == nil {
return ErrKubeconfigNotSet
}
client, err := k.clientGenerator.NewClient(k.kubeconfig)
if err != nil {
return err
}
// convert marshaler object into []*resource.info
infos, err := client.GetObjects(resources)
if err != nil {
return err
}
// apply each object, one by one
for i, resource := range infos {
if err := client.ApplyOneObject(resource, forceConflicts); err != nil {
return fmt.Errorf("kubectl apply of object %v/%v: %w", i, len(infos), err)
}
}
return nil
}
// SetKubeconfig will store the kubeconfig to generate Clients using the clientGenerator later.
func (k *Kubectl) SetKubeconfig(kubeconfig []byte) {
k.kubeconfig = kubeconfig
}

View file

@ -0,0 +1,113 @@
package kubectl
import (
"errors"
"testing"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
"github.com/stretchr/testify/assert"
"go.uber.org/goleak"
"k8s.io/cli-runtime/pkg/resource"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
type stubClient struct {
applyOneObjectErr error
getObjectsInfos []*resource.Info
getObjectsErr error
}
func (s *stubClient) ApplyOneObject(info *resource.Info, forceConflicts bool) error {
return s.applyOneObjectErr
}
func (s *stubClient) GetObjects(resources resources.Marshaler) ([]*resource.Info, error) {
return s.getObjectsInfos, s.getObjectsErr
}
type stubClientGenerator struct {
applyOneObjectErr error
getObjectsInfos []*resource.Info
getObjectsErr error
newClientErr error
}
func (s *stubClientGenerator) NewClient(kubeconfig []byte) (Client, error) {
return &stubClient{
s.applyOneObjectErr,
s.getObjectsInfos,
s.getObjectsErr,
}, s.newClientErr
}
type dummyResource struct{}
func (*dummyResource) Marshal() ([]byte, error) {
panic("dummy")
}
func TestApplyWorks(t *testing.T) {
assert := assert.New(t)
kube := Kubectl{
clientGenerator: &stubClientGenerator{
getObjectsInfos: []*resource.Info{
{},
},
},
}
kube.SetKubeconfig([]byte("someConfig"))
assert.NoError(kube.Apply(&dummyResource{}, true))
}
func TestKubeconfigUnset(t *testing.T) {
assert := assert.New(t)
kube := Kubectl{}
assert.ErrorIs(kube.Apply(&dummyResource{}, true), ErrKubeconfigNotSet)
}
func TestClientGeneratorFails(t *testing.T) {
assert := assert.New(t)
err := errors.New("generator failed")
kube := Kubectl{
clientGenerator: &stubClientGenerator{
newClientErr: err,
},
}
kube.SetKubeconfig([]byte("someConfig"))
assert.ErrorIs(kube.Apply(&dummyResource{}, true), err)
}
func TestGetObjectsFails(t *testing.T) {
assert := assert.New(t)
err := errors.New("getObjects failed")
kube := Kubectl{
clientGenerator: &stubClientGenerator{
getObjectsErr: err,
},
}
kube.SetKubeconfig([]byte("someConfig"))
assert.ErrorIs(kube.Apply(&dummyResource{}, true), err)
}
func TestApplyOneObjectFails(t *testing.T) {
assert := assert.New(t)
err := errors.New("applyOneObject failed")
kube := Kubectl{
clientGenerator: &stubClientGenerator{
getObjectsInfos: []*resource.Info{
{},
},
applyOneObjectErr: err,
},
}
kube.SetKubeconfig([]byte("someConfig"))
assert.ErrorIs(kube.Apply(&dummyResource{}, true), err)
}

View file

@ -0,0 +1,201 @@
package resources
import (
"github.com/edgelesssys/constellation/internal/secrets"
"google.golang.org/protobuf/proto"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// accessManagerDeployment holds the configuration for the SSH user creation pods. User/Key definitions are stored in the ConfigMap, and the manager is deployed on each node by the DaemonSet.
type accessManagerDeployment struct {
ConfigMap k8s.ConfigMap
ServiceAccount k8s.ServiceAccount
Role rbac.Role
RoleBinding rbac.RoleBinding
DaemonSet apps.DaemonSet
ImagePullSecret k8s.Secret
}
// NewAccessManagerDeployment creates a new *accessManagerDeployment which manages the SSH users for the cluster.
func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeployment {
return &accessManagerDeployment{
ServiceAccount: k8s.ServiceAccount{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-access-manager",
Namespace: "kube-system",
},
AutomountServiceAccountToken: proto.Bool(true),
},
ConfigMap: k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: v1.ObjectMeta{
Name: "ssh-users",
Namespace: "kube-system",
},
Data: sshUsers,
},
DaemonSet: apps.DaemonSet{
TypeMeta: v1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: v1.ObjectMeta{
Name: "constellation-access-manager",
Namespace: "kube-system",
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
},
},
Spec: apps.DaemonSetSpec{
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
},
},
Spec: k8s.PodSpec{
Tolerations: []k8s.Toleration{
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
ImagePullSecrets: []k8s.LocalObjectReference{
{
Name: secrets.PullSecretName,
},
},
Containers: []k8s.Container{
{
Name: "pause",
Image: "gcr.io/google_containers/pause",
ImagePullPolicy: k8s.PullIfNotPresent,
},
},
InitContainers: []k8s.Container{
{
Name: "constellation-access-manager",
Image: accessManagerImage,
VolumeMounts: []k8s.VolumeMount{
{
Name: "host",
MountPath: "/host",
},
},
SecurityContext: &k8s.SecurityContext{
Capabilities: &k8s.Capabilities{
Add: []k8s.Capability{
"SYS_CHROOT",
},
},
},
},
},
ServiceAccountName: "constellation-access-manager",
Volumes: []k8s.Volume{
{
Name: "host",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{
Path: "/",
},
},
},
},
},
},
},
},
Role: rbac.Role{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-access-manager",
Namespace: "kube-system",
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{
"configmaps",
},
ResourceNames: []string{
"ssh-users",
},
Verbs: []string{
"get",
},
},
},
},
RoleBinding: rbac.RoleBinding{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-access-manager",
Namespace: "kube-system",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "constellation-access-manager",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "constellation-access-manager",
Namespace: "kube-system",
},
},
},
ImagePullSecret: NewImagePullSecret(),
}
}
// Marshal marshals the access-manager deployment as YAML documents.
func (c *accessManagerDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(c)
}

View file

@ -0,0 +1,37 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestAccessManagerMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
// Without data
accessManagerDeplNil := NewAccessManagerDeployment(nil)
data, err := accessManagerDeplNil.Marshal()
require.NoError(err)
var recreated accessManagerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(accessManagerDeplNil, &recreated)
// With data
sshUsers := make(map[string]string)
sshUsers["test-user"] = "ssh-rsa abcdefg"
accessManagerDeplNil = NewAccessManagerDeployment(sshUsers)
data, err = accessManagerDeplNil.Marshal()
require.NoError(err)
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(accessManagerDeplNil, &recreated)
}

View file

@ -0,0 +1,245 @@
package resources
import (
"fmt"
"github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/secrets"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type activationDaemonset struct {
ClusterRole rbac.ClusterRole
ClusterRoleBinding rbac.ClusterRoleBinding
ConfigMap k8s.ConfigMap
DaemonSet apps.DaemonSet
ServiceAccount k8s.ServiceAccount
Service k8s.Service
}
// NewActivationDaemonset returns a daemonset for the activation service.
func NewActivationDaemonset(csp, measurementsJSON, idJSON string) *activationDaemonset {
return &activationDaemonset{
ClusterRole: rbac.ClusterRole{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-service",
Labels: map[string]string{
"k8s-app": "activation-service",
},
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"secrets"},
Verbs: []string{"get", "list", "create", "update"},
},
{
APIGroups: []string{"rbac.authorization.k8s.io"},
Resources: []string{"roles", "rolebindings"},
Verbs: []string{"create", "update"},
},
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-service",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "activation-service",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "activation-service",
Namespace: "kube-system",
},
},
},
DaemonSet: apps.DaemonSet{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-service",
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "activation-service",
"component": "activation-service",
"kubernetes.io/cluster-service": "true",
},
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "activation-service",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "activation-service",
},
},
Spec: k8s.PodSpec{
PriorityClassName: "system-cluster-critical",
ServiceAccountName: "activation-service",
Tolerations: []k8s.Toleration{
{
Key: "CriticalAddonsOnly",
Operator: k8s.TolerationOpExists,
},
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoExecute,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
// Only run on control plane nodes
NodeSelector: map[string]string{
"node-role.kubernetes.io/master": "",
},
ImagePullSecrets: []k8s.LocalObjectReference{
{
Name: secrets.PullSecretName,
},
},
Containers: []k8s.Container{
{
Name: "activation-service",
Image: activationImage,
Ports: []k8s.ContainerPort{
{
ContainerPort: constants.ActivationServicePort,
Name: "tcp",
},
},
SecurityContext: &k8s.SecurityContext{
Privileged: func(b bool) *bool { return &b }(true),
},
Args: []string{
fmt.Sprintf("--cloud-provider=%s", csp),
fmt.Sprintf("--kms-endpoint=kms.kube-system:%d", constants.KMSPort),
},
VolumeMounts: []k8s.VolumeMount{
{
Name: "config",
ReadOnly: true,
MountPath: constants.ServiceBasePath,
},
{
Name: "kubeadm",
ReadOnly: true,
MountPath: "/etc/kubernetes",
},
},
},
},
Volumes: []k8s.Volume{
{
Name: "config",
VolumeSource: k8s.VolumeSource{
ConfigMap: &k8s.ConfigMapVolumeSource{
LocalObjectReference: k8s.LocalObjectReference{
Name: "activation-config",
},
},
},
},
{
Name: "kubeadm",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{
Path: "/etc/kubernetes",
},
},
},
},
},
},
},
},
ServiceAccount: k8s.ServiceAccount{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-service",
Namespace: "kube-system",
},
},
Service: k8s.Service{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-service",
Namespace: "kube-system",
},
Spec: k8s.ServiceSpec{
Type: k8s.ServiceTypeNodePort,
Ports: []k8s.ServicePort{
{
Name: "grpc",
Protocol: k8s.ProtocolTCP,
Port: constants.ActivationServicePort,
TargetPort: intstr.IntOrString{IntVal: constants.ActivationServicePort},
NodePort: constants.ActivationServiceNodePort,
},
},
Selector: map[string]string{
"k8s-app": "activation-service",
},
},
},
ConfigMap: k8s.ConfigMap{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-config",
Namespace: "kube-system",
},
Data: map[string]string{
"measurements": measurementsJSON,
"id": idJSON,
},
},
}
}
// Marshal the daemonset using the Kubernetes resource marshaller.
func (a *activationDaemonset) Marshal() ([]byte, error) {
return MarshalK8SResources(a)
}

View file

@ -0,0 +1,18 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewActivationDaemonset(t *testing.T) {
deployment := NewActivationDaemonset("csp", "measurementsJSON", "idJSON")
deploymentYAML, err := deployment.Marshal()
require.NoError(t, err)
var recreated activationDaemonset
require.NoError(t, UnmarshalK8SResources(deploymentYAML, &recreated))
assert.Equal(t, deployment, &recreated)
}

View file

@ -0,0 +1,33 @@
package resources
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
auditv1 "k8s.io/apiserver/pkg/apis/audit/v1"
)
// AuditPolicy defines rulesets for what should be logged in the kube-apiserver audit log.
// reference: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ .
type AuditPolicy struct {
Policy auditv1.Policy
}
func NewDefaultAuditPolicy() *AuditPolicy {
return &AuditPolicy{
Policy: auditv1.Policy{
TypeMeta: v1.TypeMeta{
APIVersion: "audit.k8s.io/v1",
Kind: "Policy",
},
Rules: []auditv1.PolicyRule{
{
Level: auditv1.LevelMetadata,
},
},
},
}
}
// Marshal marshals the audit policy as a YAML document.
func (p *AuditPolicy) Marshal() ([]byte, error) {
return MarshalK8SResources(p)
}

View file

@ -0,0 +1,21 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAuditPolicyMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
auditPolicy := NewDefaultAuditPolicy()
data, err := auditPolicy.Marshal()
require.NoError(err)
var recreated AuditPolicy
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(auditPolicy, &recreated)
}

View file

@ -0,0 +1,172 @@
package resources
import (
"fmt"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type cloudControllerManagerDeployment struct {
ServiceAccount k8s.ServiceAccount
ClusterRoleBinding rbac.ClusterRoleBinding
DaemonSet apps.DaemonSet
}
// references:
// https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/admin/cloud/ccm-example.yaml
// https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager
// NewDefaultCloudControllerManagerDeployment creates a new *cloudControllerManagerDeployment, customized for the CSP.
func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podCIDR string, extraArgs []string, extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *cloudControllerManagerDeployment {
command := []string{
path,
fmt.Sprintf("--cloud-provider=%s", cloudProvider),
"--leader-elect=true",
fmt.Sprintf("--cluster-cidr=%s", podCIDR),
"-v=2",
}
command = append(command, extraArgs...)
volumes := []k8s.Volume{
{
Name: "etckubernetes",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{Path: "/etc/kubernetes"},
},
},
{
Name: "etcssl",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{Path: "/etc/ssl"},
},
},
{
Name: "etcpki",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{Path: "/etc/pki"},
},
},
}
volumes = append(volumes, extraVolumes...)
volumeMounts := []k8s.VolumeMount{
{
MountPath: "/etc/kubernetes",
Name: "etckubernetes",
ReadOnly: true,
},
{
MountPath: "/etc/ssl",
Name: "etcssl",
ReadOnly: true,
},
{
MountPath: "/etc/pki",
Name: "etcpki",
ReadOnly: true,
},
}
volumeMounts = append(volumeMounts, extraVolumeMounts...)
return &cloudControllerManagerDeployment{
ServiceAccount: k8s.ServiceAccount{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: meta.ObjectMeta{
Name: "cloud-controller-manager",
Namespace: "kube-system",
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: meta.ObjectMeta{
Name: "system:cloud-controller-manager",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "cluster-admin",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "cloud-controller-manager",
Namespace: "kube-system",
},
},
},
DaemonSet: apps.DaemonSet{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "cloud-controller-manager",
},
Name: "cloud-controller-manager",
Namespace: "kube-system",
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "cloud-controller-manager",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "cloud-controller-manager",
},
},
Spec: k8s.PodSpec{
ServiceAccountName: "cloud-controller-manager",
Containers: []k8s.Container{
{
Name: "cloud-controller-manager",
Image: image,
Command: command,
VolumeMounts: volumeMounts,
Env: env,
},
},
Volumes: volumes,
Tolerations: []k8s.Toleration{
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/master",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node.kubernetes.io/not-ready",
Effect: k8s.TaintEffectNoSchedule,
},
},
NodeSelector: map[string]string{
"node-role.kubernetes.io/master": "",
},
},
},
},
},
}
}
func (c *cloudControllerManagerDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(c)
}

View file

@ -0,0 +1,22 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
k8s "k8s.io/api/core/v1"
)
func TestCloudControllerMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
cloudControllerManagerDepl := NewDefaultCloudControllerManagerDeployment("dummy-cloudprovider", "some-image:latest", "/dummy_path", "192.0.2.0/24", []string{}, []k8s.Volume{}, []k8s.VolumeMount{}, nil)
data, err := cloudControllerManagerDepl.Marshal()
require.NoError(err)
var recreated cloudControllerManagerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(cloudControllerManagerDepl, &recreated)
}

View file

@ -0,0 +1,180 @@
package resources
import (
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/resource"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type cloudNodeManagerDeployment struct {
ServiceAccount k8s.ServiceAccount
ClusterRole rbac.ClusterRole
ClusterRoleBinding rbac.ClusterRoleBinding
DaemonSet apps.DaemonSet
}
// NewDefaultCloudNodeManagerDeployment creates a new *cloudNodeManagerDeployment, customized for the CSP.
func NewDefaultCloudNodeManagerDeployment(image, path string, extraArgs []string) *cloudNodeManagerDeployment {
command := []string{
path,
"--node-name=$(NODE_NAME)",
}
command = append(command, extraArgs...)
return &cloudNodeManagerDeployment{
ServiceAccount: k8s.ServiceAccount{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: meta.ObjectMeta{
Name: "cloud-node-manager",
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "cloud-node-manager",
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
},
},
ClusterRole: rbac.ClusterRole{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: meta.ObjectMeta{
Name: "cloud-node-manager",
Labels: map[string]string{
"k8s-app": "cloud-node-manager",
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"watch", "list", "get", "update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes/status"},
Verbs: []string{"patch"},
},
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: meta.ObjectMeta{
Name: "cloud-node-manager",
Labels: map[string]string{
"k8s-app": "cloud-node-manager",
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "cloud-node-manager",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "cloud-node-manager",
Namespace: "kube-system",
},
},
},
DaemonSet: apps.DaemonSet{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: meta.ObjectMeta{
Name: "cloud-node-manager",
Namespace: "kube-system",
Labels: map[string]string{
"component": "cloud-node-manager",
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{"k8s-app": "cloud-node-manager"},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{"k8s-app": "cloud-node-manager"},
Annotations: map[string]string{"cluster-autoscaler.kubernetes.io/daemonset-pod": "true"},
},
Spec: k8s.PodSpec{
PriorityClassName: "system-node-critical",
ServiceAccountName: "cloud-node-manager",
HostNetwork: true,
NodeSelector: map[string]string{"kubernetes.io/os": "linux"},
Tolerations: []k8s.Toleration{
{
Key: "CriticalAddonsOnly",
Operator: k8s.TolerationOpExists,
},
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoExecute,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
Containers: []k8s.Container{
{
Name: "cloud-node-manager",
Image: image,
ImagePullPolicy: k8s.PullIfNotPresent,
Command: command,
Env: []k8s.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &k8s.EnvVarSource{
FieldRef: &k8s.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
Resources: k8s.ResourceRequirements{
Requests: k8s.ResourceList{
k8s.ResourceCPU: resource.MustParse("50m"),
k8s.ResourceMemory: resource.MustParse("50Mi"),
},
},
},
},
},
},
},
},
}
}
// Marshal marshals the cloud-node-manager deployment as YAML documents.
func (c *cloudNodeManagerDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(c)
}

View file

@ -0,0 +1,21 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCloudNodeManagerMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
cloudNodeManagerDepl := NewDefaultCloudNodeManagerDeployment("image", "path", []string{})
data, err := cloudNodeManagerDepl.Marshal()
require.NoError(err)
var recreated cloudNodeManagerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(cloudNodeManagerDepl, &recreated)
}

View file

@ -0,0 +1,505 @@
package resources
import (
"google.golang.org/protobuf/proto"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
rbac "k8s.io/api/rbac/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type autoscalerDeployment struct {
PodDisruptionBudget policy.PodDisruptionBudget
ServiceAccount k8s.ServiceAccount
ClusterRole rbac.ClusterRole
ClusterRoleBinding rbac.ClusterRoleBinding
Role rbac.Role
RoleBinding rbac.RoleBinding
Service k8s.Service
Deployment apps.Deployment
}
// NewDefaultAutoscalerDeployment creates a new *autoscalerDeployment, customized for the CSP.
func NewDefaultAutoscalerDeployment(extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *autoscalerDeployment {
return &autoscalerDeployment{
PodDisruptionBudget: policy.PodDisruptionBudget{
TypeMeta: v1.TypeMeta{
APIVersion: "policy/v1",
Kind: "PodDisruptionBudget",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "default",
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
},
MaxUnavailable: &intstr.IntOrString{
Type: intstr.Int,
IntVal: 1,
},
},
},
ServiceAccount: k8s.ServiceAccount{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
AutomountServiceAccountToken: proto.Bool(true),
},
ClusterRole: rbac.ClusterRole{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{
"events",
"endpoints",
},
Verbs: []string{
"create",
"patch",
},
},
{
APIGroups: []string{""},
Resources: []string{
"pods/eviction",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{""},
Resources: []string{
"pods/status",
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{""},
Resources: []string{
"endpoints",
},
ResourceNames: []string{
"cluster-autoscaler",
},
Verbs: []string{
"get",
"update",
},
},
{
APIGroups: []string{""},
Resources: []string{
"nodes",
},
Verbs: []string{
"watch",
"list",
"get",
"update",
},
},
{
APIGroups: []string{""},
Resources: []string{
"namespaces",
"pods",
"services",
"replicationcontrollers",
"persistentvolumeclaims",
"persistentvolumes",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"batch",
},
Resources: []string{
"jobs",
"cronjobs",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"batch",
"extensions",
},
Resources: []string{
"jobs",
},
Verbs: []string{
"get",
"list",
"patch",
"watch",
},
},
{
APIGroups: []string{
"extensions",
},
Resources: []string{
"replicasets",
"daemonsets",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"policy",
},
Resources: []string{
"poddisruptionbudgets",
},
Verbs: []string{
"watch",
"list",
},
},
{
APIGroups: []string{
"apps",
},
Resources: []string{
"daemonsets",
"replicasets",
"statefulsets",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"storage.k8s.io",
},
Resources: []string{
"storageclasses",
"csinodes",
"csidrivers",
"csistoragecapacities",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{""},
Resources: []string{
"configmaps",
},
Verbs: []string{
"list",
"watch",
},
},
{
APIGroups: []string{
"coordination.k8s.io",
},
Resources: []string{
"leases",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{
"coordination.k8s.io",
},
ResourceNames: []string{
"cluster-autoscaler",
},
Resources: []string{
"leases",
},
Verbs: []string{
"get",
"update",
},
},
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "constellation-cluster-autoscaler",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
},
},
Role: rbac.Role{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{
"configmaps",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{""},
Resources: []string{
"configmaps",
},
ResourceNames: []string{
"cluster-autoscaler-status",
},
Verbs: []string{
"delete",
"get",
"update",
},
},
},
},
RoleBinding: rbac.RoleBinding{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "constellation-cluster-autoscaler",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
},
},
Service: k8s.Service{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "default",
},
Spec: k8s.ServiceSpec{
Ports: []k8s.ServicePort{
{
Port: 8085,
Protocol: k8s.ProtocolTCP,
TargetPort: intstr.FromInt(8085),
Name: "http",
},
},
Selector: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
Type: k8s.ServiceTypeClusterIP,
},
},
Deployment: apps.Deployment{
TypeMeta: v1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
Spec: apps.DeploymentSpec{
Replicas: proto.Int32(1),
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
},
Spec: k8s.PodSpec{
PriorityClassName: "system-cluster-critical",
DNSPolicy: k8s.DNSClusterFirst,
Containers: []k8s.Container{
{
Name: "cluster-autoscaler",
Image: clusterAutoscalerImage,
ImagePullPolicy: k8s.PullIfNotPresent,
LivenessProbe: &k8s.Probe{
ProbeHandler: k8s.ProbeHandler{
HTTPGet: &k8s.HTTPGetAction{
Path: "/health-check",
Port: intstr.FromInt(8085),
},
},
},
Ports: []k8s.ContainerPort{
{
ContainerPort: 8085,
},
},
VolumeMounts: extraVolumeMounts,
Env: env,
},
},
Volumes: extraVolumes,
ServiceAccountName: "constellation-cluster-autoscaler",
Tolerations: []k8s.Toleration{
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
},
NodeSelector: map[string]string{
"node-role.kubernetes.io/control-plane": "",
},
},
},
},
},
}
}
func (a *autoscalerDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(a)
}
func (a *autoscalerDeployment) SetAutoscalerCommand(cloudprovider string, autoscalingNodeGroups []string) {
command := []string{
"./cluster-autoscaler",
"--cloud-provider",
cloudprovider,
"--logtostderr=true",
"--stderrthreshold=info",
"--v=2",
"--namespace=kube-system",
}
for _, autoscalingNodeGroup := range autoscalingNodeGroups {
command = append(command, "--nodes", autoscalingNodeGroup)
}
a.Deployment.Spec.Template.Spec.Containers[0].Command = command
}

View file

@ -0,0 +1,41 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAutoscalerDeploymentMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
autoscalerDepl := NewDefaultAutoscalerDeployment(nil, nil, nil)
data, err := autoscalerDepl.Marshal()
require.NoError(err)
t.Log(string(data))
var recreated autoscalerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(autoscalerDepl, &recreated)
}
func TestAutoscalerDeploymentWithCommandMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
autoscalerDepl := NewDefaultAutoscalerDeployment(nil, nil, nil)
autoscalerDepl.SetAutoscalerCommand("someProvider", []string{"group1", "group2"})
data, err := autoscalerDepl.Marshal()
require.NoError(err)
t.Log(string(data))
var recreated autoscalerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(autoscalerDepl, &recreated)
}

View file

@ -0,0 +1,18 @@
package resources
import (
k8s "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// ConfigMaps represent a list of k8s ConfigMap.
type ConfigMaps []*k8s.ConfigMap
// Marshal marshals config maps into multiple YAML documents.
func (s ConfigMaps) Marshal() ([]byte, error) {
objects := make([]runtime.Object, len(s))
for i := range s {
objects[i] = s[i]
}
return MarshalK8SResourcesList(objects)
}

View file

@ -0,0 +1,49 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
k8s "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestConfigMaps(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
configMaps := ConfigMaps{
&k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{"key": "value1"},
},
&k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{"key": "value2"},
},
}
data, err := configMaps.Marshal()
require.NoError(err)
assert.Equal(`apiVersion: v1
data:
key: value1
kind: ConfigMap
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: value2
kind: ConfigMap
metadata:
creationTimestamp: null
`, string(data))
}

View file

@ -0,0 +1,32 @@
package resources
import (
"encoding/base64"
"fmt"
"github.com/edgelesssys/constellation/internal/secrets"
k8s "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NewImagePullSecret creates a new k8s.Secret from the config for authenticating when pulling images.
func NewImagePullSecret() k8s.Secret {
base64EncodedSecret := base64.StdEncoding.EncodeToString(
[]byte(fmt.Sprintf("%s:%s", secrets.PullSecretUser, secrets.PullSecretToken)),
)
pullSecretDockerCfgJson := fmt.Sprintf(`{"auths":{"ghcr.io":{"auth":"%s"}}}`, base64EncodedSecret)
return k8s.Secret{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: meta.ObjectMeta{
Name: secrets.PullSecretName,
Namespace: "kube-system",
},
StringData: map[string]string{".dockerconfigjson": pullSecretDockerCfgJson},
Type: "kubernetes.io/dockerconfigjson",
}
}

View file

@ -0,0 +1,13 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestImagePullSecret(t *testing.T) {
imgPullSec := NewImagePullSecret()
_, err := imgPullSec.Marshal()
assert.NoError(t, err)
}

View file

@ -0,0 +1,12 @@
package resources
const (
// Constellation images.
activationImage = "ghcr.io/edgelesssys/constellation/activation-service:v1.2"
accessManagerImage = "ghcr.io/edgelesssys/constellation/access-manager:v1.2"
kmsImage = "ghcr.io/edgelesssys/constellation/kmsserver:v1.2"
verificationImage = "ghcr.io/edgelesssys/constellation/verification-service:v1.2"
// external images.
clusterAutoscalerImage = "k8s.gcr.io/autoscaling/cluster-autoscaler:v1.23.0"
)

View file

@ -0,0 +1,268 @@
package resources
import (
"fmt"
"github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/secrets"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type kmsDeployment struct {
ServiceAccount k8s.ServiceAccount
ServiceInternal k8s.Service
ServiceExternal k8s.Service
ClusterRole rbac.ClusterRole
ClusterRoleBinding rbac.ClusterRoleBinding
Deployment apps.Deployment
MasterSecret k8s.Secret
ImagePullSecret k8s.Secret
}
// NewKMSDeployment creates a new *kmsDeployment to use as the key management system inside Constellation.
func NewKMSDeployment(csp string, masterSecret []byte) *kmsDeployment {
return &kmsDeployment{
ServiceAccount: k8s.ServiceAccount{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
Namespace: "kube-system",
},
},
ServiceInternal: k8s.Service{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
Namespace: "kube-system",
},
Spec: k8s.ServiceSpec{
Type: k8s.ServiceTypeClusterIP,
Ports: []k8s.ServicePort{
{
Name: "grpc",
Protocol: k8s.ProtocolTCP,
Port: constants.KMSPort,
TargetPort: intstr.FromInt(constants.KMSPort),
},
},
Selector: map[string]string{
"k8s-app": "kms",
},
},
},
ServiceExternal: k8s.Service{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms-external",
Namespace: "kube-system",
},
Spec: k8s.ServiceSpec{
Type: k8s.ServiceTypeNodePort,
Ports: []k8s.ServicePort{
{
Name: "atls",
Protocol: k8s.ProtocolTCP,
Port: constants.KMSATLSPort,
TargetPort: intstr.FromInt(constants.KMSATLSPort),
NodePort: constants.KMSNodePort,
},
},
Selector: map[string]string{
"k8s-app": "kms",
},
},
},
ClusterRole: rbac.ClusterRole{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
Labels: map[string]string{
"k8s-app": "kms",
},
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"secrets"},
Verbs: []string{"get"},
},
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "kms",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "kms",
Namespace: "kube-system",
},
},
},
Deployment: apps.Deployment{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "kms",
},
Name: "kms",
Namespace: "kube-system",
},
Spec: apps.DeploymentSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "kms",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "kms",
},
},
Spec: k8s.PodSpec{
PriorityClassName: "system-cluster-critical",
Tolerations: []k8s.Toleration{
{
Key: "CriticalAddonsOnly",
Operator: k8s.TolerationOpExists,
},
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoExecute,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
// Only run on control plane nodes
NodeSelector: map[string]string{
"node-role.kubernetes.io/master": "",
},
ImagePullSecrets: []k8s.LocalObjectReference{
{
Name: secrets.PullSecretName,
},
},
Volumes: []k8s.Volume{
{
Name: "config",
VolumeSource: k8s.VolumeSource{
Projected: &k8s.ProjectedVolumeSource{
Sources: []k8s.VolumeProjection{
{
ConfigMap: &k8s.ConfigMapProjection{
LocalObjectReference: k8s.LocalObjectReference{
Name: "activation-config",
},
Items: []k8s.KeyToPath{
{
Key: constants.MeasurementsFilename,
Path: constants.MeasurementsFilename,
},
},
},
},
{
Secret: &k8s.SecretProjection{
LocalObjectReference: k8s.LocalObjectReference{
Name: constants.ConstellationMasterSecretStoreName,
},
Items: []k8s.KeyToPath{
{
Key: constants.ConstellationMasterSecretKey,
Path: constants.MasterSecretFilename,
},
},
},
},
},
},
},
},
},
ServiceAccountName: "kms",
Containers: []k8s.Container{
{
Name: "kms",
Image: kmsImage,
Args: []string{
fmt.Sprintf("--atls-port=%d", constants.KMSATLSPort),
fmt.Sprintf("--port=%d", constants.KMSPort),
fmt.Sprintf("--cloud-provider=%s", csp),
},
VolumeMounts: []k8s.VolumeMount{
{
Name: "config",
ReadOnly: true,
MountPath: constants.ServiceBasePath,
},
},
},
},
},
},
},
},
MasterSecret: k8s.Secret{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: meta.ObjectMeta{
Name: constants.ConstellationMasterSecretStoreName,
Namespace: "kube-system",
},
Data: map[string][]byte{
constants.ConstellationMasterSecretKey: masterSecret,
},
Type: "Opaque",
},
ImagePullSecret: NewImagePullSecret(),
}
}
func (c *kmsDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(c)
}

View file

@ -0,0 +1,22 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestKMSMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
testMS := []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
kmsDepl := NewKMSDeployment("test", testMS)
data, err := kmsDepl.Marshal()
require.NoError(err)
var recreated kmsDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(kmsDepl, &recreated)
}

View file

@ -0,0 +1,149 @@
package resources
import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"gopkg.in/yaml.v3"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/client-go/kubernetes/scheme"
)
// Marshaler is used by all k8s resources that can be marshaled to YAML.
type Marshaler interface {
Marshal() ([]byte, error)
}
// MarshalK8SResources marshals every field of a struct into a k8s resource YAML.
func MarshalK8SResources(resources any) ([]byte, error) {
if resources == nil {
return nil, errors.New("marshal on nil called")
}
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, nil, nil)
var buf bytes.Buffer
// reflect over struct containing fields that are k8s resources
value := reflect.ValueOf(resources)
if value.Kind() != reflect.Ptr && value.Kind() != reflect.Interface {
return nil, errors.New("marshal on non-pointer called")
}
elem := value.Elem()
if elem.Kind() == reflect.Struct {
// iterate over all struct fields
for i := 0; i < elem.NumField(); i++ {
field := elem.Field(i)
var inter any
// check if value can be converted to interface
if field.CanInterface() {
inter = field.Addr().Interface()
} else {
continue
}
// convert field interface to runtime.Object
obj, ok := inter.(runtime.Object)
if !ok {
continue
}
if i > 0 {
// separate YAML documents
buf.Write([]byte("---\n"))
}
// serialize k8s resource
if err := serializer.Encode(obj, &buf); err != nil {
return nil, err
}
}
}
return buf.Bytes(), nil
}
// UnmarshalK8SResources takes YAML and converts it into a k8s resources struct.
func UnmarshalK8SResources(data []byte, into any) error {
if into == nil {
return errors.New("unmarshal on nil called")
}
// reflect over struct containing fields that are k8s resources
value := reflect.ValueOf(into).Elem()
if value.Kind() != reflect.Struct {
return errors.New("can only reflect over struct")
}
decoder := serializer.NewCodecFactory(scheme.Scheme).UniversalDecoder()
documents, err := splitYAML(data)
if err != nil {
return fmt.Errorf("splitting deployment YAML into multiple documents: %w", err)
}
if len(documents) != value.NumField() {
return fmt.Errorf("expected %v YAML documents, got %v", value.NumField(), len(documents))
}
for i := 0; i < value.NumField(); i++ {
field := value.Field(i)
var inter any
// check if value can be converted to interface
if !field.CanInterface() {
return fmt.Errorf("cannot use struct field %v as interface", i)
}
inter = field.Addr().Interface()
// convert field interface to runtime.Object
obj, ok := inter.(runtime.Object)
if !ok {
return fmt.Errorf("cannot convert struct field %v as k8s runtime object", i)
}
// decode YAML document into struct field
if err := runtime.DecodeInto(decoder, documents[i], obj); err != nil {
return err
}
}
return nil
}
// MarshalK8SResourcesList marshals every element of a slice into a k8s resource YAML.
func MarshalK8SResourcesList(resources []runtime.Object) ([]byte, error) {
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, nil, nil)
var buf bytes.Buffer
for i, obj := range resources {
if i > 0 {
// separate YAML documents
buf.Write([]byte("---\n"))
}
// serialize k8s resource
if err := serializer.Encode(obj, &buf); err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
// splitYAML splits a YAML multidoc into a slice of multiple YAML docs.
func splitYAML(resources []byte) ([][]byte, error) {
dec := yaml.NewDecoder(bytes.NewReader(resources))
var res [][]byte
for {
var value any
err := dec.Decode(&value)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, err
}
valueBytes, err := yaml.Marshal(value)
if err != nil {
return nil, err
}
res = append(res, valueBytes)
}
return res, nil
}

View file

@ -0,0 +1,360 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
k8s "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func TestMarshalK8SResources(t *testing.T) {
testCases := map[string]struct {
resources any
wantErr bool
wantYAML string
}{
"ConfigMap as only field can be marshaled": {
resources: &struct {
ConfigMap k8s.ConfigMap
}{
ConfigMap: k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
},
wantYAML: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
`,
},
"Multiple fields are correctly encoded": {
resources: &struct {
ConfigMap k8s.ConfigMap
Secret k8s.Secret
}{
ConfigMap: k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
Secret: k8s.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
Data: map[string][]byte{
"key": []byte("value"),
},
},
},
wantYAML: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: dmFsdWU=
kind: Secret
metadata:
creationTimestamp: null
`,
},
"Non-pointer is detected": {
resources: "non-pointer",
wantErr: true,
},
"Nil resource pointer is detected": {
resources: nil,
wantErr: true,
},
"Non-pointer field is ignored": {
resources: &struct{ String string }{String: "somestring"},
},
"nil field is ignored": {
resources: &struct {
ConfigMap *k8s.ConfigMap
}{
ConfigMap: nil,
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
yaml, err := MarshalK8SResources(tc.resources)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal(tc.wantYAML, string(yaml))
})
}
}
func TestUnmarshalK8SResources(t *testing.T) {
testCases := map[string]struct {
data string
into any
wantObj any
wantErr bool
}{
"ConfigMap as only field can be unmarshaled": {
data: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
`,
into: &struct {
ConfigMap k8s.ConfigMap
}{},
wantObj: &struct {
ConfigMap k8s.ConfigMap
}{
ConfigMap: k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
},
},
"Multiple fields are correctly unmarshaled": {
data: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: dmFsdWU=
kind: Secret
metadata:
creationTimestamp: null
`,
into: &struct {
ConfigMap k8s.ConfigMap
Secret k8s.Secret
}{},
wantObj: &struct {
ConfigMap k8s.ConfigMap
Secret k8s.Secret
}{
ConfigMap: k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
Secret: k8s.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
Data: map[string][]byte{
"key": []byte("value"),
},
},
},
},
"Mismatching amount of fields is detected": {
data: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: dmFsdWU=
kind: Secret
metadata:
creationTimestamp: null
`,
into: &struct {
ConfigMap k8s.ConfigMap
}{},
wantErr: true,
},
"Non-struct pointer is detected": {
into: proto.String("test"),
wantErr: true,
},
"Nil into is detected": {
into: nil,
wantErr: true,
},
"Invalid yaml is detected": {
data: `duplicateKey: value
duplicateKey: value`,
into: &struct {
ConfigMap k8s.ConfigMap
}{},
wantErr: true,
},
"Struct field cannot interface with runtime.Object": {
data: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
`,
into: &struct {
String string
}{},
wantErr: true,
},
"Struct field mismatch": {
data: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
`,
into: &struct {
Secret k8s.Secret
}{},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
err := UnmarshalK8SResources([]byte(tc.data), tc.into)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal(tc.wantObj, tc.into)
})
}
}
func TestMarshalK8SResourcesList(t *testing.T) {
testCases := map[string]struct {
resources []runtime.Object
wantErr bool
wantYAML string
}{
"ConfigMap as only element be marshaled": {
resources: []runtime.Object{
&k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
},
wantYAML: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
`,
},
"Multiple fields are correctly encoded": {
resources: []runtime.Object{
&k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
&k8s.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
Data: map[string][]byte{
"key": []byte("value"),
},
},
},
wantYAML: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: dmFsdWU=
kind: Secret
metadata:
creationTimestamp: null
`,
},
"Nil resource pointer is encodes": {
resources: []runtime.Object{nil},
wantYAML: "null\n",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
yaml, err := MarshalK8SResourcesList(tc.resources)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal(tc.wantYAML, string(yaml))
})
}
}

View file

@ -0,0 +1,18 @@
package resources
import (
k8s "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// ConfigMaps represent a list of k8s Secret.
type Secrets []*k8s.Secret
// Marshal marshals secrets into multiple YAML documents.
func (s Secrets) Marshal() ([]byte, error) {
objects := make([]runtime.Object, len(s))
for i := range s {
objects[i] = s[i]
}
return MarshalK8SResourcesList(objects)
}

View file

@ -0,0 +1,49 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
k8s "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestSecrets(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
secrets := Secrets{
&k8s.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
Data: map[string][]byte{"key": []byte("value1")},
},
&k8s.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
Data: map[string][]byte{"key": []byte("value2")},
},
}
data, err := secrets.Marshal()
require.NoError(err)
assert.Equal(`apiVersion: v1
data:
key: dmFsdWUx
kind: Secret
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: dmFsdWUy
kind: Secret
metadata:
creationTimestamp: null
`, string(data))
}

View file

@ -0,0 +1,153 @@
package resources
import (
"fmt"
"github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/secrets"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type verificationDaemonset struct {
DaemonSet apps.DaemonSet
Service k8s.Service
}
func NewVerificationDaemonSet(csp string) *verificationDaemonset {
return &verificationDaemonset{
DaemonSet: apps.DaemonSet{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: meta.ObjectMeta{
Name: "verification-service",
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "verification-service",
"component": "verification-service",
},
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "verification-service",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "verification-service",
},
},
Spec: k8s.PodSpec{
Tolerations: []k8s.Toleration{
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoExecute,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
ImagePullSecrets: []k8s.LocalObjectReference{
{
Name: secrets.PullSecretName,
},
},
Containers: []k8s.Container{
{
Name: "verification-service",
Image: verificationImage,
Ports: []k8s.ContainerPort{
{
Name: "http",
ContainerPort: constants.VerifyServicePortHTTP,
},
{
Name: "grpc",
ContainerPort: constants.VerifyServicePortGRPC,
},
},
SecurityContext: &k8s.SecurityContext{
Privileged: func(b bool) *bool { return &b }(true),
},
Args: []string{
fmt.Sprintf("--cloud-provider=%s", csp),
},
VolumeMounts: []k8s.VolumeMount{
{
Name: "event-log",
ReadOnly: true,
MountPath: "/sys/kernel/security/",
},
},
},
},
Volumes: []k8s.Volume{
{
Name: "event-log",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{
Path: "/sys/kernel/security/",
},
},
},
},
},
},
},
},
Service: k8s.Service{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: meta.ObjectMeta{
Name: "verification-service",
Namespace: "kube-system",
},
Spec: k8s.ServiceSpec{
Type: k8s.ServiceTypeNodePort,
Ports: []k8s.ServicePort{
{
Name: "http",
Protocol: k8s.ProtocolTCP,
Port: constants.VerifyServicePortHTTP,
TargetPort: intstr.FromInt(constants.VerifyServicePortHTTP),
NodePort: constants.VerifyServiceNodePortHTTP,
},
{
Name: "grpc",
Protocol: k8s.ProtocolTCP,
Port: constants.VerifyServicePortGRPC,
TargetPort: intstr.FromInt(constants.VerifyServicePortGRPC),
NodePort: constants.VerifyServiceNodePortGRPC,
},
},
Selector: map[string]string{
"k8s-app": "verification-service",
},
},
},
}
}
func (v *verificationDaemonset) Marshal() ([]byte, error) {
return MarshalK8SResources(v)
}

View file

@ -0,0 +1,18 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewVerificationDaemonset(t *testing.T) {
deployment := NewVerificationDaemonSet("csp")
deploymentYAML, err := deployment.Marshal()
require.NoError(t, err)
var recreated verificationDaemonset
require.NoError(t, UnmarshalK8SResources(deploymentYAML, &recreated))
assert.Equal(t, deployment, &recreated)
}

View file

@ -0,0 +1,68 @@
package k8sapi
import (
"context"
"fmt"
"github.com/coreos/go-systemd/v22/dbus"
)
func restartSystemdUnit(ctx context.Context, unit string) error {
conn, err := dbus.NewSystemdConnectionContext(ctx)
if err != nil {
return fmt.Errorf("establishing systemd connection: %w", err)
}
restartChan := make(chan string)
if _, err := conn.RestartUnitContext(ctx, unit, "replace", restartChan); err != nil {
return fmt.Errorf("restarting systemd unit %q: %w", unit, err)
}
// Wait for the restart to finish and actually check if it was
// successful or not.
result := <-restartChan
switch result {
case "done":
return nil
default:
return fmt.Errorf("restarting systemd unit %q failed: expected %v but received %v", unit, "done", result)
}
}
func startSystemdUnit(ctx context.Context, unit string) error {
conn, err := dbus.NewSystemdConnectionContext(ctx)
if err != nil {
return fmt.Errorf("establishing systemd connection: %w", err)
}
startChan := make(chan string)
if _, err := conn.StartUnitContext(ctx, unit, "replace", startChan); err != nil {
return fmt.Errorf("starting systemd unit %q: %w", unit, err)
}
// Wait for the enable to finish and actually check if it was
// successful or not.
result := <-startChan
switch result {
case "done":
return nil
default:
return fmt.Errorf("starting systemd unit %q failed: expected %v but received %v", unit, "done", result)
}
}
func enableSystemdUnit(ctx context.Context, unitPath string) error {
conn, err := dbus.NewSystemdConnectionContext(ctx)
if err != nil {
return fmt.Errorf("establishing systemd connection: %w", err)
}
if _, _, err := conn.EnableUnitFilesContext(ctx, []string{unitPath}, true, true); err != nil {
return fmt.Errorf("enabling systemd unit %q: %w", unitPath, err)
}
return nil
}

View file

@ -0,0 +1,363 @@
package k8sapi
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"os/exec"
"regexp"
"strings"
"time"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
const (
// kubeConfig is the path to the Kubernetes admin config (used for authentication).
kubeConfig = "/etc/kubernetes/admin.conf"
// kubeletStartTimeout is the maximum time given to the kubelet service to (re)start.
kubeletStartTimeout = 10 * time.Minute
)
var (
kubernetesKeyRegexp = regexp.MustCompile("[a-f0-9]{64}")
providerIDRegex = regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachineScaleSets/([^/]+)/virtualMachines/([^/]+)$`)
)
// Client provides the functionality of `kubectl apply`.
type Client interface {
Apply(resources resources.Marshaler, forceConflicts bool) error
SetKubeconfig(kubeconfig []byte)
// TODO: add tolerations
}
type ClusterUtil interface {
InstallComponents(ctx context.Context, version string) error
InitCluster(initConfig []byte) error
JoinCluster(joinConfig []byte) error
SetupPodNetwork(kubectl Client, podNetworkConfiguration resources.Marshaler) error
SetupAccessManager(kubectl Client, accessManagerConfiguration resources.Marshaler) error
SetupAutoscaling(kubectl Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error
SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error
SetupCloudNodeManager(kubectl Client, cloudNodeManagerConfiguration resources.Marshaler) error
SetupKMS(kubectl Client, kmsConfiguration resources.Marshaler) error
StartKubelet() error
RestartKubelet() error
GetControlPlaneJoinCertificateKey() (string, error)
CreateJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error)
}
// KubernetesUtil provides low level management of the kubernetes cluster.
type KubernetesUtil struct {
inst installer
}
// NewKubernetesUtils creates a new KubernetesUtil.
func NewKubernetesUtil() *KubernetesUtil {
return &KubernetesUtil{
inst: newOSInstaller(),
}
}
// InstallComponents installs kubernetes components in the version specified.
func (k *KubernetesUtil) InstallComponents(ctx context.Context, version string) error {
var versionConf kubernetesVersion
var ok bool
if versionConf, ok = versionConfigs[version]; !ok {
return fmt.Errorf("unsupported kubernetes version %q", version)
}
if err := versionConf.installK8sComponents(ctx, k.inst); err != nil {
return err
}
return enableSystemdUnit(ctx, kubeletServiceEtcPath)
}
func (k *KubernetesUtil) InitCluster(ctx context.Context, initConfig []byte) error {
// TODO: audit policy should be user input
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
if err != nil {
return fmt.Errorf("generating default audit policy: %w", err)
}
if err := os.WriteFile(auditPolicyPath, auditPolicy, 0o644); err != nil {
return fmt.Errorf("writing default audit policy: %w", err)
}
initConfigFile, err := os.CreateTemp("", "kubeadm-init.*.yaml")
if err != nil {
return fmt.Errorf("creating init config file %v: %w", initConfigFile.Name(), err)
}
defer os.Remove(initConfigFile.Name())
if _, err := initConfigFile.Write(initConfig); err != nil {
return fmt.Errorf("writing kubeadm init yaml config %v: %w", initConfigFile.Name(), err)
}
cmd := exec.CommandContext(ctx, kubeadmPath, "init", "--config", initConfigFile.Name())
_, err = cmd.Output()
if err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
return fmt.Errorf("kubeadm init failed (code %v) with: %s", exitErr.ExitCode(), exitErr.Stderr)
}
return fmt.Errorf("kubeadm init: %w", err)
}
return nil
}
type SetupPodNetworkInput struct {
CloudProvider string
NodeName string
FirstNodePodCIDR string
SubnetworkPodCIDR string
ProviderID string
}
// SetupPodNetwork sets up the cilium pod network.
func (k *KubernetesUtil) SetupPodNetwork(ctx context.Context, in SetupPodNetworkInput) error {
switch in.CloudProvider {
case "gcp":
return k.setupGCPPodNetwork(ctx, in.NodeName, in.FirstNodePodCIDR, in.SubnetworkPodCIDR)
case "azure":
return k.setupAzurePodNetwork(ctx, in.ProviderID, in.SubnetworkPodCIDR)
case "qemu":
return k.setupQemuPodNetwork(ctx, in.SubnetworkPodCIDR)
default:
return fmt.Errorf("unsupported cloud provider %q", in.CloudProvider)
}
}
func (k *KubernetesUtil) setupAzurePodNetwork(ctx context.Context, providerID, subnetworkPodCIDR string) error {
matches := providerIDRegex.FindStringSubmatch(providerID)
if len(matches) != 5 {
return fmt.Errorf("error splitting providerID %q", providerID)
}
ciliumInstall := exec.CommandContext(ctx, "cilium", "install", "--azure-resource-group", matches[2], "--ipam", "azure",
"--helm-set",
"tunnel=disabled,enableIPv4Masquerade=true,azure.enabled=true,debug.enabled=true,ipv4NativeRoutingCIDR="+subnetworkPodCIDR+
",endpointRoutes.enabled=true,encryption.enabled=true,encryption.type=wireguard,l7Proxy=false,egressMasqueradeInterfaces=eth0")
ciliumInstall.Env = append(os.Environ(), "KUBECONFIG="+kubeConfig)
out, err := ciliumInstall.CombinedOutput()
if err != nil {
err = errors.New(string(out))
return err
}
return nil
}
func (k *KubernetesUtil) setupGCPPodNetwork(ctx context.Context, nodeName, nodePodCIDR, subnetworkPodCIDR string) error {
out, err := exec.CommandContext(ctx, kubectlPath, "--kubeconfig", kubeConfig, "patch", "node", nodeName, "-p", "{\"spec\":{\"podCIDR\": \""+nodePodCIDR+"\"}}").CombinedOutput()
if err != nil {
err = errors.New(string(out))
return err
}
// allow coredns to run on uninitialized nodes (required by cloud-controller-manager)
err = exec.CommandContext(ctx, kubectlPath, "--kubeconfig", kubeConfig, "-n", "kube-system", "patch", "deployment", "coredns", "--type", "json", "-p", "[{\"op\":\"add\",\"path\":\"/spec/template/spec/tolerations/-\",\"value\":{\"key\":\"node.cloudprovider.kubernetes.io/uninitialized\",\"value\":\"true\",\"effect\":\"NoSchedule\"}},{\"op\":\"add\",\"path\":\"/spec/template/spec/nodeSelector\",\"value\":{\"node-role.kubernetes.io/control-plane\":\"\"}}]").Run()
if err != nil {
return err
}
ciliumInstall := exec.CommandContext(ctx, "cilium", "install", "--ipam", "kubernetes", "--ipv4-native-routing-cidr", subnetworkPodCIDR,
"--helm-set", "endpointRoutes.enabled=true,tunnel=disabled,encryption.enabled=true,encryption.type=wireguard,l7Proxy=false")
ciliumInstall.Env = append(os.Environ(), "KUBECONFIG="+kubeConfig)
out, err = ciliumInstall.CombinedOutput()
if err != nil {
err = errors.New(string(out))
return err
}
return nil
}
// FixCilium fixes https://github.com/cilium/cilium/issues/19958 but instead of a rollout restart of
// the cilium daemonset, it only restarts the local cilium pod.
func (k *KubernetesUtil) FixCilium(nodeNameK8s string) {
// wait for cilium pod to be healthy
for {
time.Sleep(5 * time.Second)
resp, err := http.Get("http://127.0.0.1:9876/healthz")
if err != nil {
fmt.Printf("waiting for local cilium daemonset pod not healthy: %v\n", err)
continue
}
resp.Body.Close()
if resp.StatusCode == 200 {
break
}
}
// get cilium pod name
out, err := exec.CommandContext(context.Background(), "/bin/bash", "-c", "/run/state/bin/crictl ps -o json | jq -r '.containers[] | select(.metadata.name == \"cilium-agent\") | .podSandboxId'").CombinedOutput()
if err != nil {
fmt.Printf("getting pod id failed: %v: %v\n", err, string(out))
return
}
outLines := strings.Split(string(out), "\n")
fmt.Println(outLines)
podID := outLines[len(outLines)-2]
// stop and delete pod
out, err = exec.CommandContext(context.Background(), "/run/state/bin/crictl", "stopp", podID).CombinedOutput()
if err != nil {
fmt.Printf("stopping cilium agent pod failed: %v: %v\n", err, string(out))
return
}
out, err = exec.CommandContext(context.Background(), "/run/state/bin/crictl", "rmp", podID).CombinedOutput()
if err != nil {
fmt.Printf("removing cilium agent pod failed: %v: %v\n", err, string(out))
}
}
func (k *KubernetesUtil) setupQemuPodNetwork(ctx context.Context, subnetworkPodCIDR string) error {
ciliumInstall := exec.CommandContext(ctx, "cilium", "install", "--encryption", "wireguard", "--helm-set", "ipam.operator.clusterPoolIPv4PodCIDRList="+subnetworkPodCIDR+",endpointRoutes.enabled=true")
ciliumInstall.Env = append(os.Environ(), "KUBECONFIG="+kubeConfig)
out, err := ciliumInstall.CombinedOutput()
if err != nil {
err = errors.New(string(out))
return err
}
return nil
}
// SetupAutoscaling deploys the k8s cluster autoscaler.
func (k *KubernetesUtil) SetupAutoscaling(kubectl Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error {
if err := kubectl.Apply(secrets, true); err != nil {
return fmt.Errorf("applying cluster-autoscaler Secrets: %w", err)
}
return kubectl.Apply(clusterAutoscalerConfiguration, true)
}
// SetupActivationService deploys the Constellation node activation service.
func (k *KubernetesUtil) SetupActivationService(kubectl Client, activationServiceConfiguration resources.Marshaler) error {
return kubectl.Apply(activationServiceConfiguration, true)
}
// SetupCloudControllerManager deploys the k8s cloud-controller-manager.
func (k *KubernetesUtil) SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error {
if err := kubectl.Apply(configMaps, true); err != nil {
return fmt.Errorf("applying ccm ConfigMaps: %w", err)
}
if err := kubectl.Apply(secrets, true); err != nil {
return fmt.Errorf("applying ccm Secrets: %w", err)
}
if err := kubectl.Apply(cloudControllerManagerConfiguration, true); err != nil {
return fmt.Errorf("applying ccm: %w", err)
}
return nil
}
// SetupCloudNodeManager deploys the k8s cloud-node-manager.
func (k *KubernetesUtil) SetupCloudNodeManager(kubectl Client, cloudNodeManagerConfiguration resources.Marshaler) error {
return kubectl.Apply(cloudNodeManagerConfiguration, true)
}
// SetupAccessManager deploys the constellation-access-manager for deploying SSH keys on control-plane & worker nodes.
func (k *KubernetesUtil) SetupAccessManager(kubectl Client, accessManagerConfiguration resources.Marshaler) error {
return kubectl.Apply(accessManagerConfiguration, true)
}
// SetupKMS deploys the KMS deployment.
func (k *KubernetesUtil) SetupKMS(kubectl Client, kmsConfiguration resources.Marshaler) error {
if err := kubectl.Apply(kmsConfiguration, true); err != nil {
return fmt.Errorf("applying KMS configuration: %w", err)
}
return nil
}
// SetupVerificationService deploys the verification service.
func (k *KubernetesUtil) SetupVerificationService(kubectl Client, verificationServiceConfiguration resources.Marshaler) error {
return kubectl.Apply(verificationServiceConfiguration, true)
}
// JoinCluster joins existing Kubernetes cluster using kubeadm join.
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte) error {
// TODO: audit policy should be user input
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
if err != nil {
return fmt.Errorf("generating default audit policy: %w", err)
}
if err := os.WriteFile(auditPolicyPath, auditPolicy, 0o644); err != nil {
return fmt.Errorf("writing default audit policy: %w", err)
}
joinConfigFile, err := os.CreateTemp("", "kubeadm-join.*.yaml")
if err != nil {
return fmt.Errorf("creating join config file %v: %w", joinConfigFile.Name(), err)
}
defer os.Remove(joinConfigFile.Name())
if _, err := joinConfigFile.Write(joinConfig); err != nil {
return fmt.Errorf("writing kubeadm init yaml config %v: %w", joinConfigFile.Name(), err)
}
// run `kubeadm join` to join a worker node to an existing Kubernetes cluster
cmd := exec.CommandContext(ctx, kubeadmPath, "join", "--config", joinConfigFile.Name())
if _, err := cmd.Output(); err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
return fmt.Errorf("kubeadm join failed (code %v) with: %s", exitErr.ExitCode(), exitErr.Stderr)
}
return fmt.Errorf("kubeadm join: %w", err)
}
return nil
}
// StartKubelet enables and starts the kubelet systemd unit.
func (k *KubernetesUtil) StartKubelet() error {
ctx, cancel := context.WithTimeout(context.TODO(), kubeletStartTimeout)
defer cancel()
if err := enableSystemdUnit(ctx, kubeletServiceEtcPath); err != nil {
return fmt.Errorf("enabling kubelet systemd unit: %w", err)
}
return startSystemdUnit(ctx, "kubelet.service")
}
// RestartKubelet restarts a kubelet.
func (k *KubernetesUtil) RestartKubelet() error {
ctx, cancel := context.WithTimeout(context.TODO(), kubeletStartTimeout)
defer cancel()
return restartSystemdUnit(ctx, "kubelet.service")
}
// GetControlPlaneJoinCertificateKey return the key which can be used in combination with the joinArgs
// to join the Cluster as control-plane.
func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey(ctx context.Context) (string, error) {
// Key will be valid for 1h (no option to reduce the duration).
// https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-upload-certs
output, err := exec.CommandContext(ctx, kubeadmPath, "init", "phase", "upload-certs", "--upload-certs").Output()
if err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
return "", fmt.Errorf("kubeadm upload-certs failed (code %v) with: %s", exitErr.ExitCode(), exitErr.Stderr)
}
return "", fmt.Errorf("kubeadm upload-certs: %w", err)
}
// Example output:
/*
[upload-certs] Storing the certificates in ConfigMap "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
9555b74008f24687eb964bd90a164ecb5760a89481d9c55a77c129b7db438168
*/
key := kubernetesKeyRegexp.FindString(string(output))
if key == "" {
return "", fmt.Errorf("failed to parse kubeadm output: %s", string(output))
}
return key, nil
}
// CreateJoinToken creates a new bootstrap (join) token.
func (k *KubernetesUtil) CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
output, err := exec.CommandContext(ctx, kubeadmPath, "token", "create", "--ttl", ttl.String(), "--print-join-command").Output()
if err != nil {
return nil, fmt.Errorf("kubeadm token create: %w", err)
}
// `kubeadm token create [...] --print-join-command` outputs the following format:
// kubeadm join [API_SERVER_ENDPOINT] --token [TOKEN] --discovery-token-ca-cert-hash [DISCOVERY_TOKEN_CA_CERT_HASH]
return ParseJoinCommand(string(output))
}

View file

@ -0,0 +1,95 @@
package k8sapi
import (
"context"
"fmt"
"io/fs"
"github.com/icholy/replace"
"golang.org/x/text/transform"
)
const (
cniPluginsDir = "/opt/cni/bin"
binDir = "/run/state/bin"
kubeadmPath = "/run/state/bin/kubeadm"
kubeletPath = "/run/state/bin/kubelet"
kubectlPath = "/run/state/bin/kubectl"
kubeletServiceEtcPath = "/etc/systemd/system/kubelet.service"
kubeletServiceStatePath = "/run/state/systemd/system/kubelet.service"
kubeadmConfEtcPath = "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
kubeadmConfStatePath = "/run/state/systemd/system/kubelet.service.d/10-kubeadm.conf"
executablePerm = 0o544
systemdUnitPerm = 0o644
)
// versionConfigs holds download URLs for all required kubernetes components for every supported version.
var versionConfigs map[string]kubernetesVersion = map[string]kubernetesVersion{
"1.23.6": {
CNIPluginsURL: "https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz",
CrictlURL: "https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.1/crictl-v1.24.1-linux-amd64.tar.gz",
KubeletServiceURL: "https://raw.githubusercontent.com/kubernetes/release/v0.13.0/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service",
KubeadmConfURL: "https://raw.githubusercontent.com/kubernetes/release/v0.13.0/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf",
KubeletURL: "https://storage.googleapis.com/kubernetes-release/release/v1.23.6/bin/linux/amd64/kubelet",
KubeadmURL: "https://storage.googleapis.com/kubernetes-release/release/v1.23.6/bin/linux/amd64/kubeadm",
KubectlURL: "https://storage.googleapis.com/kubernetes-release/release/v1.23.6/bin/linux/amd64/kubectl",
},
}
type kubernetesVersion struct {
CNIPluginsURL string
CrictlURL string
KubeletServiceURL string
KubeadmConfURL string
KubeletURL string
KubeadmURL string
KubectlURL string
}
// installK8sComponents installs kubernetes components for this version.
// reference: https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-kubeadm-kubelet-and-kubectl .
func (k *kubernetesVersion) installK8sComponents(ctx context.Context, inst installer) error {
if err := inst.Install(
ctx, k.CNIPluginsURL, []string{cniPluginsDir}, executablePerm, true,
); err != nil {
return fmt.Errorf("installing cni plugins: %w", err)
}
if err := inst.Install(
ctx, k.CrictlURL, []string{binDir}, executablePerm, true,
); err != nil {
return fmt.Errorf("installing crictl: %w", err)
}
if err := inst.Install(
ctx, k.KubeletServiceURL, []string{kubeletServiceEtcPath, kubeletServiceStatePath}, systemdUnitPerm, false, replace.String("/usr/bin", binDir),
); err != nil {
return fmt.Errorf("installing kubelet service: %w", err)
}
if err := inst.Install(
ctx, k.KubeadmConfURL, []string{kubeadmConfEtcPath, kubeadmConfStatePath}, systemdUnitPerm, false, replace.String("/usr/bin", binDir),
); err != nil {
return fmt.Errorf("installing kubeadm conf: %w", err)
}
if err := inst.Install(
ctx, k.KubeletURL, []string{kubeletPath}, executablePerm, false,
); err != nil {
return fmt.Errorf("installing kubelet: %w", err)
}
if err := inst.Install(
ctx, k.KubeadmURL, []string{kubeadmPath}, executablePerm, false,
); err != nil {
return fmt.Errorf("installing kubeadm: %w", err)
}
if err := inst.Install(
ctx, k.KubectlURL, []string{kubectlPath}, executablePerm, false,
); err != nil {
return fmt.Errorf("installing kubectl: %w", err)
}
return nil
}
type installer interface {
Install(
ctx context.Context, sourceURL string, destinations []string, perm fs.FileMode,
extract bool, transforms ...transform.Transformer,
) error
}

View file

@ -0,0 +1,29 @@
package kubernetes
import (
"context"
"time"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
type clusterUtil interface {
InstallComponents(ctx context.Context, version string) error
InitCluster(ctx context.Context, initConfig []byte) error
JoinCluster(ctx context.Context, joinConfig []byte) error
SetupPodNetwork(context.Context, k8sapi.SetupPodNetworkInput) error
SetupAccessManager(kubectl k8sapi.Client, sshUsers resources.Marshaler) error
SetupAutoscaling(kubectl k8sapi.Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error
SetupActivationService(kubectl k8sapi.Client, activationServiceConfiguration resources.Marshaler) error
SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error
SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration resources.Marshaler) error
SetupKMS(kubectl k8sapi.Client, kmsConfiguration resources.Marshaler) error
SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration resources.Marshaler) error
StartKubelet() error
RestartKubelet() error
GetControlPlaneJoinCertificateKey(ctx context.Context) (string, error)
CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error)
FixCilium(nodeNameK8s string)
}

View file

@ -0,0 +1,23 @@
package kubernetes
import (
"fmt"
"github.com/spf13/afero"
)
const kubeconfigPath = "/etc/kubernetes/admin.conf"
// KubeconfigReader implements ConfigReader.
type KubeconfigReader struct {
fs afero.Afero
}
// ReadKubeconfig reads the Kubeconfig from disk.
func (r KubeconfigReader) ReadKubeconfig() ([]byte, error) {
kubeconfig, err := r.fs.ReadFile(kubeconfigPath)
if err != nil {
return nil, fmt.Errorf("reading gce config: %w", err)
}
return kubeconfig, nil
}

View file

@ -0,0 +1,34 @@
package kubernetes
import (
"testing"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestReadKubeconfig(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
fs := afero.Afero{
Fs: afero.NewMemMapFs(),
}
require.NoError(fs.WriteFile(kubeconfigPath, []byte("someConfig"), 0o644))
reader := KubeconfigReader{fs}
config, err := reader.ReadKubeconfig()
require.NoError(err)
assert.Equal([]byte("someConfig"), config)
}
func TestReadKubeconfigFails(t *testing.T) {
assert := assert.New(t)
fs := afero.Afero{
Fs: afero.NewMemMapFs(),
}
reader := KubeconfigReader{fs}
_, err := reader.ReadKubeconfig()
assert.Error(err)
}

View file

@ -0,0 +1,369 @@
package kubernetes
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
"github.com/edgelesssys/constellation/bootstrapper/role"
"github.com/edgelesssys/constellation/bootstrapper/util"
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
"github.com/edgelesssys/constellation/internal/cloud/metadata"
"github.com/spf13/afero"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
// configReader provides kubeconfig as []byte.
type configReader interface {
ReadKubeconfig() ([]byte, error)
}
// configurationProvider provides kubeadm init and join configuration.
type configurationProvider interface {
InitConfiguration(externalCloudProvider bool) k8sapi.KubeadmInitYAML
JoinConfiguration(externalCloudProvider bool) k8sapi.KubeadmJoinYAML
}
// KubeWrapper implements Cluster interface.
type KubeWrapper struct {
cloudProvider string
clusterUtil clusterUtil
configProvider configurationProvider
client k8sapi.Client
kubeconfigReader configReader
cloudControllerManager CloudControllerManager
cloudNodeManager CloudNodeManager
clusterAutoscaler ClusterAutoscaler
providerMetadata ProviderMetadata
initialMeasurementsJSON []byte
getIPAddr func() (string, error)
}
// New creates a new KubeWrapper with real values.
func New(cloudProvider string, clusterUtil clusterUtil, configProvider configurationProvider, client k8sapi.Client, cloudControllerManager CloudControllerManager,
cloudNodeManager CloudNodeManager, clusterAutoscaler ClusterAutoscaler, providerMetadata ProviderMetadata, initialMeasurementsJSON []byte,
) *KubeWrapper {
return &KubeWrapper{
cloudProvider: cloudProvider,
clusterUtil: clusterUtil,
configProvider: configProvider,
client: client,
kubeconfigReader: &KubeconfigReader{fs: afero.Afero{Fs: afero.NewOsFs()}},
cloudControllerManager: cloudControllerManager,
cloudNodeManager: cloudNodeManager,
clusterAutoscaler: clusterAutoscaler,
providerMetadata: providerMetadata,
initialMeasurementsJSON: initialMeasurementsJSON,
getIPAddr: util.GetIPAddr,
}
}
type KMSConfig struct {
MasterSecret []byte
KMSURI string
StorageURI string
KeyEncryptionKeyID string
UseExistingKEK bool
}
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
func (k *KubeWrapper) InitCluster(
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, k8sVersion string,
id attestationtypes.ID, kmsConfig KMSConfig, sshUsers map[string]string,
) ([]byte, error) {
// TODO: k8s version should be user input
if err := k.clusterUtil.InstallComponents(ctx, k8sVersion); err != nil {
return nil, err
}
ip, err := k.getIPAddr()
if err != nil {
return nil, err
}
nodeName := ip
var providerID string
var instance metadata.InstanceMetadata
var publicIP string
var nodePodCIDR string
var subnetworkPodCIDR string
// this is the IP in "kubeadm init --control-plane-endpoint=<IP/DNS>:<port>" hence the unfortunate name
var controlPlaneEndpointIP string
var nodeIP string
// Step 1: retrieve cloud metadata for Kubernetes configuration
if k.providerMetadata.Supported() {
instance, err = k.providerMetadata.Self(ctx)
if err != nil {
return nil, fmt.Errorf("retrieving own instance metadata failed: %w", err)
}
nodeName = k8sCompliantHostname(instance.Name)
providerID = instance.ProviderID
if len(instance.PrivateIPs) > 0 {
nodeIP = instance.PrivateIPs[0]
}
if len(instance.PublicIPs) > 0 {
publicIP = instance.PublicIPs[0]
}
if len(instance.AliasIPRanges) > 0 {
nodePodCIDR = instance.AliasIPRanges[0]
}
subnetworkPodCIDR, err = k.providerMetadata.GetSubnetworkCIDR(ctx)
if err != nil {
return nil, fmt.Errorf("retrieving subnetwork CIDR failed: %w", err)
}
controlPlaneEndpointIP = publicIP
if k.providerMetadata.SupportsLoadBalancer() {
controlPlaneEndpointIP, err = k.providerMetadata.GetLoadBalancerIP(ctx)
if err != nil {
return nil, fmt.Errorf("retrieving load balancer IP failed: %w", err)
}
}
}
// Step 2: configure kubeadm init config
initConfig := k.configProvider.InitConfiguration(k.cloudControllerManager.Supported())
initConfig.SetNodeIP(nodeIP)
initConfig.SetCertSANs([]string{publicIP, nodeIP})
initConfig.SetNodeName(nodeName)
initConfig.SetProviderID(providerID)
initConfig.SetControlPlaneEndpoint(controlPlaneEndpointIP)
initConfigYAML, err := initConfig.Marshal()
if err != nil {
return nil, fmt.Errorf("encoding kubeadm init configuration as YAML: %w", err)
}
if err := k.clusterUtil.InitCluster(ctx, initConfigYAML); err != nil {
return nil, fmt.Errorf("kubeadm init: %w", err)
}
kubeConfig, err := k.GetKubeconfig()
if err != nil {
return nil, fmt.Errorf("reading kubeconfig after cluster initialization: %w", err)
}
k.client.SetKubeconfig(kubeConfig)
// Step 3: configure & start kubernetes controllers
setupPodNetworkInput := k8sapi.SetupPodNetworkInput{
CloudProvider: k.cloudProvider,
NodeName: nodeName,
FirstNodePodCIDR: nodePodCIDR,
SubnetworkPodCIDR: subnetworkPodCIDR,
ProviderID: providerID,
}
if err = k.clusterUtil.SetupPodNetwork(ctx, setupPodNetworkInput); err != nil {
return nil, fmt.Errorf("setting up pod network: %w", err)
}
kms := resources.NewKMSDeployment(k.cloudProvider, kmsConfig.MasterSecret)
if err = k.clusterUtil.SetupKMS(k.client, kms); err != nil {
return nil, fmt.Errorf("setting up kms: %w", err)
}
if err := k.setupActivationService(k.cloudProvider, k.initialMeasurementsJSON, id); err != nil {
return nil, fmt.Errorf("setting up activation service failed: %w", err)
}
if err := k.setupCCM(ctx, subnetworkPodCIDR, cloudServiceAccountURI, instance); err != nil {
return nil, fmt.Errorf("setting up cloud controller manager: %w", err)
}
if err := k.setupCloudNodeManager(); err != nil {
return nil, fmt.Errorf("setting up cloud node manager: %w", err)
}
if err := k.setupClusterAutoscaler(instance, cloudServiceAccountURI, autoscalingNodeGroups); err != nil {
return nil, fmt.Errorf("setting up cluster autoscaler: %w", err)
}
accessManager := resources.NewAccessManagerDeployment(sshUsers)
if err := k.clusterUtil.SetupAccessManager(k.client, accessManager); err != nil {
return nil, fmt.Errorf("failed to setup access-manager: %w", err)
}
if err := k.clusterUtil.SetupVerificationService(
k.client, resources.NewVerificationDaemonSet(k.cloudProvider),
); err != nil {
return nil, fmt.Errorf("failed to setup verification service: %w", err)
}
go k.clusterUtil.FixCilium(nodeName)
return k.GetKubeconfig()
}
// JoinCluster joins existing Kubernetes cluster.
func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, certKey string, peerRole role.Role) error {
// TODO: k8s version should be user input
if err := k.clusterUtil.InstallComponents(ctx, "1.23.6"); err != nil {
return err
}
// Step 1: retrieve cloud metadata for Kubernetes configuration
nodeInternalIP, err := k.getIPAddr()
if err != nil {
return err
}
nodeName := nodeInternalIP
var providerID string
if k.providerMetadata.Supported() {
instance, err := k.providerMetadata.Self(ctx)
if err != nil {
return fmt.Errorf("retrieving own instance metadata failed: %w", err)
}
providerID = instance.ProviderID
nodeName = instance.Name
if len(instance.PrivateIPs) > 0 {
nodeInternalIP = instance.PrivateIPs[0]
}
}
nodeName = k8sCompliantHostname(nodeName)
// Step 2: configure kubeadm join config
joinConfig := k.configProvider.JoinConfiguration(k.cloudControllerManager.Supported())
joinConfig.SetApiServerEndpoint(args.APIServerEndpoint)
joinConfig.SetToken(args.Token)
joinConfig.AppendDiscoveryTokenCaCertHash(args.CACertHashes[0])
joinConfig.SetNodeIP(nodeInternalIP)
joinConfig.SetNodeName(nodeName)
joinConfig.SetProviderID(providerID)
if peerRole == role.ControlPlane {
joinConfig.SetControlPlane(nodeInternalIP, certKey)
}
joinConfigYAML, err := joinConfig.Marshal()
if err != nil {
return fmt.Errorf("encoding kubeadm join configuration as YAML: %w", err)
}
if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML); err != nil {
return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err)
}
go k.clusterUtil.FixCilium(nodeName)
return nil
}
// GetKubeconfig returns the current nodes kubeconfig of stored on disk.
func (k *KubeWrapper) GetKubeconfig() ([]byte, error) {
kubeconf, err := k.kubeconfigReader.ReadKubeconfig()
if err != nil {
return nil, err
}
// replace the cluster.Server endpoint (127.0.0.1:16443) in admin.conf with the first bootstrapper endpoint (10.118.0.1:6443)
// kube-api server listens on 10.118.0.1:6443
// 127.0.0.1:16443 is the high availability balancer nginx endpoint, runnining localy on all nodes
// alternatively one could also start a local high availability balancer.
return []byte(strings.ReplaceAll(string(kubeconf), "127.0.0.1:16443", "10.118.0.1:6443")), nil
}
// GetKubeadmCertificateKey return the key needed to join the Cluster as Control-Plane (has to be executed on a control-plane; errors otherwise).
func (k *KubeWrapper) GetKubeadmCertificateKey(ctx context.Context) (string, error) {
return k.clusterUtil.GetControlPlaneJoinCertificateKey(ctx)
}
// GetJoinToken returns a bootstrap (join) token.
func (k *KubeWrapper) GetJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
return k.clusterUtil.CreateJoinToken(ctx, ttl)
}
func (k *KubeWrapper) setupActivationService(csp string, measurementsJSON []byte, id attestationtypes.ID) error {
idJSON, err := json.Marshal(id)
if err != nil {
return err
}
activationConfiguration := resources.NewActivationDaemonset(csp, string(measurementsJSON), string(idJSON))
return k.clusterUtil.SetupActivationService(k.client, activationConfiguration)
}
func (k *KubeWrapper) setupCCM(ctx context.Context, subnetworkPodCIDR, cloudServiceAccountURI string, instance metadata.InstanceMetadata) error {
if !k.cloudControllerManager.Supported() {
return nil
}
ccmConfigMaps, err := k.cloudControllerManager.ConfigMaps(instance)
if err != nil {
return fmt.Errorf("defining ConfigMaps for CCM failed: %w", err)
}
ccmSecrets, err := k.cloudControllerManager.Secrets(ctx, instance.ProviderID, cloudServiceAccountURI)
if err != nil {
return fmt.Errorf("defining Secrets for CCM failed: %w", err)
}
cloudControllerManagerConfiguration := resources.NewDefaultCloudControllerManagerDeployment(
k.cloudControllerManager.Name(), k.cloudControllerManager.Image(), k.cloudControllerManager.Path(), subnetworkPodCIDR,
k.cloudControllerManager.ExtraArgs(), k.cloudControllerManager.Volumes(), k.cloudControllerManager.VolumeMounts(), k.cloudControllerManager.Env(),
)
if err := k.clusterUtil.SetupCloudControllerManager(k.client, cloudControllerManagerConfiguration, ccmConfigMaps, ccmSecrets); err != nil {
return fmt.Errorf("failed to setup cloud-controller-manager: %w", err)
}
return nil
}
func (k *KubeWrapper) setupCloudNodeManager() error {
if !k.cloudNodeManager.Supported() {
return nil
}
cloudNodeManagerConfiguration := resources.NewDefaultCloudNodeManagerDeployment(
k.cloudNodeManager.Image(), k.cloudNodeManager.Path(), k.cloudNodeManager.ExtraArgs(),
)
if err := k.clusterUtil.SetupCloudNodeManager(k.client, cloudNodeManagerConfiguration); err != nil {
return fmt.Errorf("failed to setup cloud-node-manager: %w", err)
}
return nil
}
func (k *KubeWrapper) setupClusterAutoscaler(instance metadata.InstanceMetadata, cloudServiceAccountURI string, autoscalingNodeGroups []string) error {
if !k.clusterAutoscaler.Supported() {
return nil
}
caSecrets, err := k.clusterAutoscaler.Secrets(instance.ProviderID, cloudServiceAccountURI)
if err != nil {
return fmt.Errorf("defining Secrets for cluster-autoscaler failed: %w", err)
}
clusterAutoscalerConfiguration := resources.NewDefaultAutoscalerDeployment(k.clusterAutoscaler.Volumes(), k.clusterAutoscaler.VolumeMounts(), k.clusterAutoscaler.Env())
clusterAutoscalerConfiguration.SetAutoscalerCommand(k.clusterAutoscaler.Name(), autoscalingNodeGroups)
if err := k.clusterUtil.SetupAutoscaling(k.client, clusterAutoscalerConfiguration, caSecrets); err != nil {
return fmt.Errorf("failed to setup cluster-autoscaler: %w", err)
}
return nil
}
// k8sCompliantHostname transforms a hostname to an RFC 1123 compliant, lowercase subdomain as required by Kubernetes node names.
// The following regex is used by k8s for validation: /^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$/ .
// Only a simple heuristic is used for now (to lowercase, replace underscores).
func k8sCompliantHostname(in string) string {
hostname := strings.ToLower(in)
hostname = strings.ReplaceAll(hostname, "_", "-")
return hostname
}
// StartKubelet starts the kubelet service.
func (k *KubeWrapper) StartKubelet() error {
return k.clusterUtil.StartKubelet()
}
type fakeK8SClient struct {
kubeconfig []byte
}
// NewFakeK8SClient creates a new, fake k8s client where apply always works.
func NewFakeK8SClient([]byte) (k8sapi.Client, error) {
return &fakeK8SClient{}, nil
}
// Apply fakes applying Kubernetes resources.
func (f *fakeK8SClient) Apply(resources resources.Marshaler, forceConflicts bool) error {
return nil
}
// SetKubeconfig stores the kubeconfig given to it.
func (f *fakeK8SClient) SetKubeconfig(kubeconfig []byte) {
f.kubeconfig = kubeconfig
}

View file

@ -0,0 +1,633 @@
package kubernetes
import (
"context"
"errors"
"regexp"
"testing"
"time"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
"github.com/edgelesssys/constellation/bootstrapper/role"
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
"github.com/edgelesssys/constellation/internal/cloud/metadata"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestInitCluster(t *testing.T) {
someErr := errors.New("failed")
serviceAccountUri := "some-service-account-uri"
masterSecret := []byte("some-master-secret")
autoscalingNodeGroups := []string{"0,10,autoscaling_group_0"}
nodeName := "node-name"
providerID := "provider-id"
privateIP := "192.0.2.1"
publicIP := "192.0.2.2"
loadbalancerIP := "192.0.2.3"
aliasIPRange := "192.0.2.0/24"
k8sVersion := "1.23.8"
testCases := map[string]struct {
clusterUtil stubClusterUtil
kubeCTL stubKubeCTL
providerMetadata ProviderMetadata
CloudControllerManager CloudControllerManager
CloudNodeManager CloudNodeManager
ClusterAutoscaler ClusterAutoscaler
kubeconfigReader configReader
wantConfig k8sapi.KubeadmInitYAML
wantErr bool
}{
"kubeadm init works without metadata": {
clusterUtil: stubClusterUtil{},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{SupportedResp: false},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{SupportedResp: false},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantConfig: k8sapi.KubeadmInitYAML{
InitConfiguration: kubeadm.InitConfiguration{
NodeRegistration: kubeadm.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"node-ip": "",
"provider-id": "",
},
Name: privateIP,
},
},
ClusterConfiguration: kubeadm.ClusterConfiguration{},
},
},
"kubeadm init works with metadata and loadbalancer": {
clusterUtil: stubClusterUtil{},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{
SupportedResp: true,
SelfResp: metadata.InstanceMetadata{
Name: nodeName,
ProviderID: providerID,
PrivateIPs: []string{privateIP},
PublicIPs: []string{publicIP},
AliasIPRanges: []string{aliasIPRange},
},
GetLoadBalancerIPResp: loadbalancerIP,
SupportsLoadBalancerResp: true,
},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{SupportedResp: false},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantConfig: k8sapi.KubeadmInitYAML{
InitConfiguration: kubeadm.InitConfiguration{
NodeRegistration: kubeadm.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"node-ip": privateIP,
"provider-id": providerID,
},
Name: nodeName,
},
},
ClusterConfiguration: kubeadm.ClusterConfiguration{
ControlPlaneEndpoint: loadbalancerIP,
APIServer: kubeadm.APIServer{
CertSANs: []string{publicIP, privateIP},
},
},
},
wantErr: false,
},
"kubeadm init fails when retrieving metadata self": {
clusterUtil: stubClusterUtil{},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{
SelfErr: someErr,
SupportedResp: true,
},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantErr: true,
},
"kubeadm init fails when retrieving metadata subnetwork cidr": {
clusterUtil: stubClusterUtil{},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{
GetSubnetworkCIDRErr: someErr,
SupportedResp: true,
},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantErr: true,
},
"kubeadm init fails when retrieving metadata loadbalancer ip": {
clusterUtil: stubClusterUtil{},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{
GetLoadBalancerIPErr: someErr,
SupportsLoadBalancerResp: true,
SupportedResp: true,
},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantErr: true,
},
"kubeadm init fails when applying the init config": {
clusterUtil: stubClusterUtil{initClusterErr: someErr},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantErr: true,
},
"kubeadm init fails when setting up the pod network": {
clusterUtil: stubClusterUtil{setupPodNetworkErr: someErr},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantErr: true,
},
"kubeadm init fails when setting up the activation service": {
clusterUtil: stubClusterUtil{setupActivationServiceError: someErr},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{},
CloudControllerManager: &stubCloudControllerManager{SupportedResp: true},
CloudNodeManager: &stubCloudNodeManager{},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantErr: true,
},
"kubeadm init fails when setting the cloud contoller manager": {
clusterUtil: stubClusterUtil{setupCloudControllerManagerError: someErr},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{},
CloudControllerManager: &stubCloudControllerManager{SupportedResp: true},
CloudNodeManager: &stubCloudNodeManager{},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantErr: true,
},
"kubeadm init fails when setting the cloud node manager": {
clusterUtil: stubClusterUtil{setupCloudNodeManagerError: someErr},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{SupportedResp: true},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantErr: true,
},
"kubeadm init fails when setting the cluster autoscaler": {
clusterUtil: stubClusterUtil{setupAutoscalingError: someErr},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{},
ClusterAutoscaler: &stubClusterAutoscaler{SupportedResp: true},
wantErr: true,
},
"kubeadm init fails when reading kubeconfig": {
clusterUtil: stubClusterUtil{},
kubeconfigReader: &stubKubeconfigReader{
ReadErr: someErr,
},
providerMetadata: &stubProviderMetadata{},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantErr: true,
},
"kubeadm init fails when setting up the kms": {
clusterUtil: stubClusterUtil{setupKMSError: someErr},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{SupportedResp: false},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{SupportedResp: false},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantErr: true,
},
"kubeadm init fails when setting up verification service": {
clusterUtil: stubClusterUtil{setupVerificationServiceErr: someErr},
kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("someKubeconfig"),
},
providerMetadata: &stubProviderMetadata{SupportedResp: false},
CloudControllerManager: &stubCloudControllerManager{},
CloudNodeManager: &stubCloudNodeManager{SupportedResp: false},
ClusterAutoscaler: &stubClusterAutoscaler{},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
kube := KubeWrapper{
clusterUtil: &tc.clusterUtil,
providerMetadata: tc.providerMetadata,
cloudControllerManager: tc.CloudControllerManager,
cloudNodeManager: tc.CloudNodeManager,
clusterAutoscaler: tc.ClusterAutoscaler,
configProvider: &stubConfigProvider{InitConfig: k8sapi.KubeadmInitYAML{}},
client: &tc.kubeCTL,
kubeconfigReader: tc.kubeconfigReader,
getIPAddr: func() (string, error) { return privateIP, nil },
}
_, err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountUri, k8sVersion, attestationtypes.ID{}, KMSConfig{MasterSecret: masterSecret}, nil)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
var kubeadmConfig k8sapi.KubeadmInitYAML
require.NoError(resources.UnmarshalK8SResources(tc.clusterUtil.initConfigs[0], &kubeadmConfig))
require.Equal(tc.wantConfig.ClusterConfiguration, kubeadmConfig.ClusterConfiguration)
require.Equal(tc.wantConfig.InitConfiguration, kubeadmConfig.InitConfiguration)
})
}
}
func TestJoinCluster(t *testing.T) {
someErr := errors.New("failed")
joinCommand := &kubeadm.BootstrapTokenDiscovery{
APIServerEndpoint: "192.0.2.0:6443",
Token: "kube-fake-token",
CACertHashes: []string{"sha256:a60ebe9b0879090edd83b40a4df4bebb20506bac1e51d518ff8f4505a721930f"},
}
privateIP := "192.0.2.1"
certKey := "cert-key"
testCases := map[string]struct {
clusterUtil stubClusterUtil
providerMetadata ProviderMetadata
CloudControllerManager CloudControllerManager
wantConfig kubeadm.JoinConfiguration
role role.Role
wantErr bool
}{
"kubeadm join worker works without metadata": {
clusterUtil: stubClusterUtil{},
providerMetadata: &stubProviderMetadata{},
CloudControllerManager: &stubCloudControllerManager{},
role: role.Worker,
wantConfig: kubeadm.JoinConfiguration{
Discovery: kubeadm.Discovery{
BootstrapToken: joinCommand,
},
NodeRegistration: kubeadm.NodeRegistrationOptions{
Name: privateIP,
KubeletExtraArgs: map[string]string{"node-ip": privateIP},
},
},
},
"kubeadm join worker works with metadata": {
clusterUtil: stubClusterUtil{},
providerMetadata: &stubProviderMetadata{
SupportedResp: true,
SelfResp: metadata.InstanceMetadata{
ProviderID: "provider-id",
Name: "metadata-name",
PrivateIPs: []string{"192.0.2.1"},
},
},
CloudControllerManager: &stubCloudControllerManager{},
role: role.Worker,
wantConfig: kubeadm.JoinConfiguration{
Discovery: kubeadm.Discovery{
BootstrapToken: joinCommand,
},
NodeRegistration: kubeadm.NodeRegistrationOptions{
Name: "metadata-name",
KubeletExtraArgs: map[string]string{"node-ip": "192.0.2.1"},
},
},
},
"kubeadm join worker works with metadata and cloud controller manager": {
clusterUtil: stubClusterUtil{},
providerMetadata: &stubProviderMetadata{
SupportedResp: true,
SelfResp: metadata.InstanceMetadata{
ProviderID: "provider-id",
Name: "metadata-name",
PrivateIPs: []string{"192.0.2.1"},
},
},
CloudControllerManager: &stubCloudControllerManager{
SupportedResp: true,
},
role: role.Worker,
wantConfig: kubeadm.JoinConfiguration{
Discovery: kubeadm.Discovery{
BootstrapToken: joinCommand,
},
NodeRegistration: kubeadm.NodeRegistrationOptions{
Name: "metadata-name",
KubeletExtraArgs: map[string]string{"node-ip": "192.0.2.1"},
},
},
},
"kubeadm join control-plane node works with metadata": {
clusterUtil: stubClusterUtil{},
providerMetadata: &stubProviderMetadata{
SupportedResp: true,
SelfResp: metadata.InstanceMetadata{
ProviderID: "provider-id",
Name: "metadata-name",
PrivateIPs: []string{"192.0.2.1"},
},
},
CloudControllerManager: &stubCloudControllerManager{},
role: role.ControlPlane,
wantConfig: kubeadm.JoinConfiguration{
Discovery: kubeadm.Discovery{
BootstrapToken: joinCommand,
},
NodeRegistration: kubeadm.NodeRegistrationOptions{
Name: "metadata-name",
KubeletExtraArgs: map[string]string{"node-ip": "192.0.2.1"},
},
ControlPlane: &kubeadm.JoinControlPlane{
LocalAPIEndpoint: kubeadm.APIEndpoint{
AdvertiseAddress: "192.0.2.1",
BindPort: 6443,
},
CertificateKey: certKey,
},
},
},
"kubeadm join worker fails when retrieving self metadata": {
clusterUtil: stubClusterUtil{},
providerMetadata: &stubProviderMetadata{
SupportedResp: true,
SelfErr: someErr,
},
CloudControllerManager: &stubCloudControllerManager{},
role: role.Worker,
wantErr: true,
},
"kubeadm join worker fails when applying the join config": {
clusterUtil: stubClusterUtil{joinClusterErr: someErr},
providerMetadata: &stubProviderMetadata{},
CloudControllerManager: &stubCloudControllerManager{},
role: role.Worker,
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
kube := KubeWrapper{
clusterUtil: &tc.clusterUtil,
providerMetadata: tc.providerMetadata,
cloudControllerManager: tc.CloudControllerManager,
configProvider: &stubConfigProvider{},
getIPAddr: func() (string, error) { return privateIP, nil },
}
err := kube.JoinCluster(context.Background(), joinCommand, certKey, tc.role)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
var joinYaml k8sapi.KubeadmJoinYAML
joinYaml, err = joinYaml.Unmarshal(tc.clusterUtil.joinConfigs[0])
require.NoError(err)
assert.Equal(tc.wantConfig, joinYaml.JoinConfiguration)
})
}
}
func TestGetKubeconfig(t *testing.T) {
testCases := map[string]struct {
Kubewrapper KubeWrapper
wantErr bool
}{
"check single replacement": {
Kubewrapper: KubeWrapper{kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("127.0.0.1:16443"),
}},
},
"check multiple replacement": {
Kubewrapper: KubeWrapper{kubeconfigReader: &stubKubeconfigReader{
Kubeconfig: []byte("127.0.0.1:16443...127.0.0.1:16443"),
}},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
data, err := tc.Kubewrapper.GetKubeconfig()
require.NoError(err)
assert.NotContains(string(data), "127.0.0.1:16443")
assert.Contains(string(data), "10.118.0.1:6443")
})
}
}
func TestK8sCompliantHostname(t *testing.T) {
compliantHostname := regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
testCases := map[string]struct {
hostname string
wantHostname string
}{
"azure scale set names work": {
hostname: "constellation-scale-set-bootstrappers-name_0",
wantHostname: "constellation-scale-set-bootstrappers-name-0",
},
"compliant hostname is not modified": {
hostname: "abcd-123",
wantHostname: "abcd-123",
},
"uppercase hostnames are lowercased": {
hostname: "ABCD",
wantHostname: "abcd",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
hostname := k8sCompliantHostname(tc.hostname)
assert.Equal(tc.wantHostname, hostname)
assert.Regexp(compliantHostname, hostname)
})
}
}
type stubClusterUtil struct {
installComponentsErr error
initClusterErr error
setupPodNetworkErr error
setupAutoscalingError error
setupActivationServiceError error
setupCloudControllerManagerError error
setupCloudNodeManagerError error
setupKMSError error
setupAccessManagerError error
setupVerificationServiceErr error
joinClusterErr error
startKubeletErr error
restartKubeletErr error
createJoinTokenResponse *kubeadm.BootstrapTokenDiscovery
createJoinTokenErr error
initConfigs [][]byte
joinConfigs [][]byte
}
func (s *stubClusterUtil) InstallComponents(ctx context.Context, version string) error {
return s.installComponentsErr
}
func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte) error {
s.initConfigs = append(s.initConfigs, initConfig)
return s.initClusterErr
}
func (s *stubClusterUtil) SetupPodNetwork(context.Context, k8sapi.SetupPodNetworkInput) error {
return s.setupPodNetworkErr
}
func (s *stubClusterUtil) SetupAutoscaling(kubectl k8sapi.Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error {
return s.setupAutoscalingError
}
func (s *stubClusterUtil) SetupActivationService(kubectl k8sapi.Client, activationServiceConfiguration resources.Marshaler) error {
return s.setupActivationServiceError
}
func (s *stubClusterUtil) SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error {
return s.setupCloudControllerManagerError
}
func (s *stubClusterUtil) SetupKMS(kubectl k8sapi.Client, kmsDeployment resources.Marshaler) error {
return s.setupKMSError
}
func (s *stubClusterUtil) SetupAccessManager(kubectl k8sapi.Client, accessManagerConfiguration resources.Marshaler) error {
return s.setupAccessManagerError
}
func (s *stubClusterUtil) SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration resources.Marshaler) error {
return s.setupCloudNodeManagerError
}
func (s *stubClusterUtil) SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration resources.Marshaler) error {
return s.setupVerificationServiceErr
}
func (s *stubClusterUtil) JoinCluster(ctx context.Context, joinConfig []byte) error {
s.joinConfigs = append(s.joinConfigs, joinConfig)
return s.joinClusterErr
}
func (s *stubClusterUtil) StartKubelet() error {
return s.startKubeletErr
}
func (s *stubClusterUtil) RestartKubelet() error {
return s.restartKubeletErr
}
func (s *stubClusterUtil) GetControlPlaneJoinCertificateKey(context.Context) (string, error) {
return "", nil
}
func (s *stubClusterUtil) CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
return s.createJoinTokenResponse, s.createJoinTokenErr
}
func (s *stubClusterUtil) FixCilium(nodeName string) {
}
type stubConfigProvider struct {
InitConfig k8sapi.KubeadmInitYAML
JoinConfig k8sapi.KubeadmJoinYAML
}
func (s *stubConfigProvider) InitConfiguration(_ bool) k8sapi.KubeadmInitYAML {
return s.InitConfig
}
func (s *stubConfigProvider) JoinConfiguration(_ bool) k8sapi.KubeadmJoinYAML {
s.JoinConfig = k8sapi.KubeadmJoinYAML{
JoinConfiguration: kubeadm.JoinConfiguration{
Discovery: kubeadm.Discovery{
BootstrapToken: &kubeadm.BootstrapTokenDiscovery{},
},
},
}
return s.JoinConfig
}
type stubKubeCTL struct {
ApplyErr error
resources []resources.Marshaler
kubeconfigs [][]byte
}
func (s *stubKubeCTL) Apply(resources resources.Marshaler, forceConflicts bool) error {
s.resources = append(s.resources, resources)
return s.ApplyErr
}
func (s *stubKubeCTL) SetKubeconfig(kubeconfig []byte) {
s.kubeconfigs = append(s.kubeconfigs, kubeconfig)
}
type stubKubeconfigReader struct {
Kubeconfig []byte
ReadErr error
}
func (s *stubKubeconfigReader) ReadKubeconfig() ([]byte, error) {
return s.Kubeconfig, s.ReadErr
}