Co-authored-by: Malte Poll <mp@edgeless.systems>
Co-authored-by: katexochen <katexochen@users.noreply.github.com>
Co-authored-by: Daniel Weiße <dw@edgeless.systems>
Co-authored-by: Thomas Tendyck <tt@edgeless.systems>
Co-authored-by: Benedict Schlueter <bs@edgeless.systems>
Co-authored-by: leongross <leon.gross@rub.de>
Co-authored-by: Moritz Eckert <m1gh7ym0@gmail.com>
This commit is contained in:
Leonard Cohnen 2022-03-22 16:03:15 +01:00
commit 2d8fcd9bf4
362 changed files with 50980 additions and 0 deletions

View file

@ -0,0 +1,48 @@
package k8sapi
import (
"flag"
"fmt"
"github.com/google/shlex"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
func ParseJoinCommand(joinCommand string) (*kubeadm.BootstrapTokenDiscovery, error) {
// Format:
// kubeadm join [API_SERVER_ENDPOINT] --token [TOKEN] --discovery-token-ca-cert-hash [DISCOVERY_TOKEN_CA_CERT_HASH] --control-plane
// split and verify that this is a kubeadm join command
argv, err := shlex.Split(joinCommand)
if err != nil {
return nil, fmt.Errorf("kubadm join command could not be tokenized: %v", joinCommand)
}
if len(argv) < 3 {
return nil, fmt.Errorf("kubadm join command is too short: %v", argv)
}
if argv[0] != "kubeadm" || argv[1] != "join" {
return nil, fmt.Errorf("not a kubeadm join command: %v", argv)
}
result := kubeadm.BootstrapTokenDiscovery{APIServerEndpoint: argv[2]}
var caCertHash string
// parse flags
flags := flag.NewFlagSet("", flag.ContinueOnError)
flags.StringVar(&result.Token, "token", "", "")
flags.StringVar(&caCertHash, "discovery-token-ca-cert-hash", "", "")
flags.Bool("control-plane", false, "")
if err := flags.Parse(argv[3:]); err != nil {
return nil, fmt.Errorf("parsing flag arguments failed: %v %w", argv, err)
}
if result.Token == "" {
return nil, fmt.Errorf("missing flag argument token: %v", argv)
}
if caCertHash == "" {
return nil, fmt.Errorf("missing flag argument discovery-token-ca-cert-hash: %v", argv)
}
result.CACertHashes = []string{caCertHash}
return &result, nil
}

View file

@ -0,0 +1,69 @@
package k8sapi
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
func TestParseJoinCommand(t *testing.T) {
testCases := map[string]struct {
joinCommand string
expectedJoinArgs kubeadm.BootstrapTokenDiscovery
expectErr bool
}{
"join command can be parsed": {
joinCommand: "kubeadm join 192.0.2.0:8443 --token dummy-token --discovery-token-ca-cert-hash sha512:dummy-hash --control-plane",
expectedJoinArgs: kubeadm.BootstrapTokenDiscovery{
APIServerEndpoint: "192.0.2.0:8443",
Token: "dummy-token",
CACertHashes: []string{"sha512:dummy-hash"},
},
expectErr: false,
},
"incorrect join command returns error": {
joinCommand: "some string",
expectErr: true,
},
"missing api server endpoint is checked": {
joinCommand: "kubeadm join --token dummy-token --discovery-token-ca-cert-hash sha512:dummy-hash --control-plane",
expectErr: true,
},
"missing token is checked": {
joinCommand: "kubeadm join 192.0.2.0:8443 --discovery-token-ca-cert-hash sha512:dummy-hash --control-plane",
expectErr: true,
},
"missing discovery-token-ca-cert-hash is checked": {
joinCommand: "kubeadm join 192.0.2.0:8443 --token dummy-token --control-plane",
expectErr: true,
},
"missing control-plane": {
joinCommand: "kubeadm join 192.0.2.0:8443 --token dummy-token --discovery-token-ca-cert-hash sha512:dummy-hash",
expectedJoinArgs: kubeadm.BootstrapTokenDiscovery{
APIServerEndpoint: "192.0.2.0:8443",
Token: "dummy-token",
CACertHashes: []string{"sha512:dummy-hash"},
},
expectErr: false,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
joinArgs, err := ParseJoinCommand(tc.joinCommand)
if tc.expectErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal(&tc.expectedJoinArgs, joinArgs)
})
}
}

View file

@ -0,0 +1,240 @@
package k8sapi
import (
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconf "k8s.io/kubelet/config/v1beta1"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
// Uses types defined here: https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/
// Slimmed down to the fields we require
const (
bindPort = 6443
)
type CoreOSConfiguration struct{}
func (c *CoreOSConfiguration) InitConfiguration() KubeadmInitYAML {
return KubeadmInitYAML{
InitConfiguration: kubeadm.InitConfiguration{
TypeMeta: v1.TypeMeta{
APIVersion: kubeadm.SchemeGroupVersion.String(),
Kind: "InitConfiguration",
},
NodeRegistration: kubeadm.NodeRegistrationOptions{
CRISocket: "/var/run/crio/crio.sock",
KubeletExtraArgs: map[string]string{
"cloud-provider": "external",
"network-plugin": "cni",
},
},
// AdvertiseAddress will be overwritten later
LocalAPIEndpoint: kubeadm.APIEndpoint{
BindPort: bindPort,
},
},
ClusterConfiguration: kubeadm.ClusterConfiguration{
TypeMeta: v1.TypeMeta{
Kind: "ClusterConfiguration",
APIVersion: kubeadm.SchemeGroupVersion.String(),
},
// necessary to be able to access the kubeapi server through localhost
APIServer: kubeadm.APIServer{
CertSANs: []string{"127.0.0.1", "10.118.0.1"},
},
ControllerManager: kubeadm.ControlPlaneComponent{
ExtraArgs: map[string]string{
"flex-volume-plugin-dir": "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/",
"cloud-provider": "external",
"configure-cloud-routes": "false",
},
},
ControlPlaneEndpoint: "127.0.0.1:16443",
},
// warning: this config is applied to every node in the cluster!
KubeletConfiguration: kubeletconf.KubeletConfiguration{
TypeMeta: v1.TypeMeta{
APIVersion: kubeletconf.SchemeGroupVersion.String(),
Kind: "KubeletConfiguration",
},
},
}
}
func (c *CoreOSConfiguration) JoinConfiguration() KubeadmJoinYAML {
return KubeadmJoinYAML{
JoinConfiguration: kubeadm.JoinConfiguration{
TypeMeta: v1.TypeMeta{
APIVersion: kubeadm.SchemeGroupVersion.String(),
Kind: "JoinConfiguration",
},
NodeRegistration: kubeadm.NodeRegistrationOptions{
CRISocket: "/var/run/crio/crio.sock",
KubeletExtraArgs: map[string]string{
"cloud-provider": "external",
},
},
Discovery: kubeadm.Discovery{
BootstrapToken: &kubeadm.BootstrapTokenDiscovery{},
},
},
KubeletConfiguration: kubeletconf.KubeletConfiguration{
TypeMeta: v1.TypeMeta{
APIVersion: kubeletconf.SchemeGroupVersion.String(),
Kind: "KubeletConfiguration",
},
},
}
}
type AWSConfiguration struct{}
func (a *AWSConfiguration) InitConfiguration() KubeadmInitYAML {
return KubeadmInitYAML{
InitConfiguration: kubeadm.InitConfiguration{
TypeMeta: v1.TypeMeta{
APIVersion: kubeadm.SchemeGroupVersion.String(),
Kind: "InitConfiguration",
},
NodeRegistration: kubeadm.NodeRegistrationOptions{
CRISocket: "/run/containerd/containerd.sock",
IgnorePreflightErrors: []string{"SystemVerification"},
},
LocalAPIEndpoint: kubeadm.APIEndpoint{BindPort: bindPort},
},
ClusterConfiguration: kubeadm.ClusterConfiguration{
TypeMeta: v1.TypeMeta{
APIVersion: kubeadm.SchemeGroupVersion.String(),
Kind: "ClusterConfiguration",
},
APIServer: kubeadm.APIServer{
CertSANs: []string{"10.118.0.1"},
},
},
KubeletConfiguration: kubeletconf.KubeletConfiguration{
TypeMeta: v1.TypeMeta{
APIVersion: kubeletconf.SchemeGroupVersion.String(),
Kind: "KubeletConfiguration",
},
},
}
}
func (a *AWSConfiguration) JoinConfiguration() KubeadmJoinYAML {
return KubeadmJoinYAML{
JoinConfiguration: kubeadm.JoinConfiguration{
TypeMeta: v1.TypeMeta{
APIVersion: kubeadm.SchemeGroupVersion.String(),
Kind: "JoinConfiguration",
},
NodeRegistration: kubeadm.NodeRegistrationOptions{
CRISocket: "/run/containerd/containerd.sock",
IgnorePreflightErrors: []string{"SystemVerification"},
},
Discovery: kubeadm.Discovery{
BootstrapToken: &kubeadm.BootstrapTokenDiscovery{},
},
},
KubeletConfiguration: kubeletconf.KubeletConfiguration{
TypeMeta: v1.TypeMeta{
APIVersion: kubeletconf.SchemeGroupVersion.String(),
Kind: "KubeletConfiguration",
},
},
}
}
type KubeadmJoinYAML struct {
JoinConfiguration kubeadm.JoinConfiguration
KubeletConfiguration kubeletconf.KubeletConfiguration
}
func (k *KubeadmJoinYAML) SetNodeName(nodeName string) {
k.JoinConfiguration.NodeRegistration.Name = nodeName
}
func (k *KubeadmJoinYAML) SetApiServerEndpoint(apiServerEndpoint string) {
k.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint
}
func (k *KubeadmJoinYAML) SetToken(token string) {
k.JoinConfiguration.Discovery.BootstrapToken.Token = token
}
func (k *KubeadmJoinYAML) AppendDiscoveryTokenCaCertHash(discoveryTokenCaCertHash string) {
k.JoinConfiguration.Discovery.BootstrapToken.CACertHashes = append(k.JoinConfiguration.Discovery.BootstrapToken.CACertHashes, discoveryTokenCaCertHash)
}
func (k *KubeadmJoinYAML) SetNodeIP(nodeIP string) {
if k.JoinConfiguration.NodeRegistration.KubeletExtraArgs == nil {
k.JoinConfiguration.NodeRegistration.KubeletExtraArgs = map[string]string{"node-ip": nodeIP}
} else {
k.JoinConfiguration.NodeRegistration.KubeletExtraArgs["node-ip"] = nodeIP
}
}
func (k *KubeadmJoinYAML) SetProviderID(providerID string) {
k.KubeletConfiguration.ProviderID = providerID
}
func (k *KubeadmJoinYAML) Marshal() ([]byte, error) {
return resources.MarshalK8SResources(k)
}
func (k *KubeadmJoinYAML) Unmarshal(yamlData []byte) (KubeadmJoinYAML, error) {
var tmp KubeadmJoinYAML
return tmp, resources.UnmarshalK8SResources(yamlData, &tmp)
}
type KubeadmInitYAML struct {
InitConfiguration kubeadm.InitConfiguration
ClusterConfiguration kubeadm.ClusterConfiguration
KubeletConfiguration kubeletconf.KubeletConfiguration
}
func (k *KubeadmInitYAML) SetNodeName(nodeName string) {
k.InitConfiguration.NodeRegistration.Name = nodeName
}
func (k *KubeadmInitYAML) SetApiServerAdvertiseAddress(apiServerAdvertiseAddress string) {
k.InitConfiguration.LocalAPIEndpoint.AdvertiseAddress = apiServerAdvertiseAddress
}
func (k *KubeadmInitYAML) SetServiceCIDR(serviceCIDR string) {
k.ClusterConfiguration.Networking.ServiceSubnet = serviceCIDR
}
func (k *KubeadmInitYAML) SetPodNetworkCIDR(podNetworkCIDR string) {
k.ClusterConfiguration.Networking.PodSubnet = podNetworkCIDR
}
func (k *KubeadmInitYAML) SetServiceDNSDomain(serviceDNSDomain string) {
k.ClusterConfiguration.Networking.DNSDomain = serviceDNSDomain
}
func (k *KubeadmInitYAML) SetNodeIP(nodeIP string) {
if k.InitConfiguration.NodeRegistration.KubeletExtraArgs == nil {
k.InitConfiguration.NodeRegistration.KubeletExtraArgs = map[string]string{"node-ip": nodeIP}
} else {
k.InitConfiguration.NodeRegistration.KubeletExtraArgs["node-ip"] = nodeIP
}
}
func (k *KubeadmInitYAML) SetProviderID(providerID string) {
if k.InitConfiguration.NodeRegistration.KubeletExtraArgs == nil {
k.InitConfiguration.NodeRegistration.KubeletExtraArgs = map[string]string{"provider-id": providerID}
} else {
k.InitConfiguration.NodeRegistration.KubeletExtraArgs["provider-id"] = providerID
}
}
func (k *KubeadmInitYAML) Marshal() ([]byte, error) {
return resources.MarshalK8SResources(k)
}
func (k *KubeadmInitYAML) Unmarshal(yamlData []byte) (KubeadmInitYAML, error) {
var tmp KubeadmInitYAML
return tmp, resources.UnmarshalK8SResources(yamlData, &tmp)
}

View file

@ -0,0 +1,123 @@
package k8sapi
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m,
// https://github.com/kubernetes/klog/issues/282, https://github.com/kubernetes/klog/issues/188
goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"),
)
}
func TestInitConfiguration(t *testing.T) {
awsConfig := AWSConfiguration{}
coreOSConfig := CoreOSConfiguration{}
testCases := map[string]struct {
config KubeadmInitYAML
}{
"AWS init config can be created": {
config: awsConfig.InitConfiguration(),
},
"AWS init config with all fields can be created": {
config: func() KubeadmInitYAML {
c := awsConfig.InitConfiguration()
c.SetApiServerAdvertiseAddress("192.0.2.0")
c.SetNodeIP("192.0.2.0")
c.SetNodeName("node")
c.SetPodNetworkCIDR("10.244.0.0/16")
c.SetServiceCIDR("10.245.0.0/24")
c.SetProviderID("somecloudprovider://instance-id")
return c
}(),
},
"CoreOS init config can be created": {
config: coreOSConfig.InitConfiguration(),
},
"CoreOS init config with all fields can be created": {
config: func() KubeadmInitYAML {
c := coreOSConfig.InitConfiguration()
c.SetApiServerAdvertiseAddress("192.0.2.0")
c.SetNodeIP("192.0.2.0")
c.SetNodeName("node")
c.SetPodNetworkCIDR("10.244.0.0/16")
c.SetServiceCIDR("10.245.0.0/24")
c.SetProviderID("somecloudprovider://instance-id")
return c
}(),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
config, err := tc.config.Marshal()
require.NoError(err)
tmp, err := tc.config.Unmarshal(config)
require.NoError(err)
// test on correct mashalling and unmarshalling
assert.Equal(tc.config.ClusterConfiguration, tmp.ClusterConfiguration)
assert.Equal(tc.config.InitConfiguration, tmp.InitConfiguration)
})
}
}
func TestJoinConfiguration(t *testing.T) {
awsConfig := AWSConfiguration{}
coreOSConfig := CoreOSConfiguration{}
testCases := map[string]struct {
config KubeadmJoinYAML
}{
"AWS join config can be created": {
config: awsConfig.JoinConfiguration(),
},
"AWS join config with all fields can be created": {
config: func() KubeadmJoinYAML {
c := awsConfig.JoinConfiguration()
c.SetApiServerEndpoint("192.0.2.0:6443")
c.SetNodeIP("192.0.2.0")
c.SetNodeName("node")
c.SetToken("token")
c.AppendDiscoveryTokenCaCertHash("discovery-token-ca-cert-hash")
c.SetProviderID("somecloudprovider://instance-id")
return c
}(),
},
"CoreOS join config can be created": {
config: coreOSConfig.JoinConfiguration(),
},
"CoreOS join config with all fields can be created": {
config: func() KubeadmJoinYAML {
c := coreOSConfig.JoinConfiguration()
c.SetApiServerEndpoint("192.0.2.0:6443")
c.SetNodeIP("192.0.2.0")
c.SetNodeName("node")
c.SetToken("token")
c.AppendDiscoveryTokenCaCertHash("discovery-token-ca-cert-hash")
c.SetProviderID("somecloudprovider://instance-id")
return c
}(),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
config, err := tc.config.Marshal()
require.NoError(err)
tmp, err := tc.config.Unmarshal(config)
require.NoError(err)
// test on correct mashalling and unmarshalling
assert.Equal(tc.config.JoinConfiguration, tmp.JoinConfiguration)
})
}
}

View file

@ -0,0 +1,90 @@
package client
import (
"bytes"
"fmt"
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
const fieldManager = "constellation-coordinator"
// Client implements k8sapi.Client interface and talks to the kubernetes API.
type Client struct {
clientset kubernetes.Interface
builder *resource.Builder
}
// New creates a new Client, talking to the real k8s API.
func New(config []byte) (*Client, error) {
clientConfig, err := clientcmd.RESTConfigFromKubeConfig(config)
if err != nil {
return nil, fmt.Errorf("creating k8s client config from kubeconfig failed: %w", err)
}
clientset, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, fmt.Errorf("creating k8s client from kubeconfig failed: %w", err)
}
restClientGetter, err := newRESTClientGetter(config)
if err != nil {
return nil, fmt.Errorf("creating k8s RESTClientGetter from kubeconfig failed: %w", err)
}
builder := resource.NewBuilder(restClientGetter).Unstructured()
return &Client{clientset: clientset, builder: builder}, nil
}
// ApplyOneObject uses server-side apply to send unstructured JSON blobs to the server and let it handle the core logic.
func (c *Client) ApplyOneObject(info *resource.Info, forceConflicts bool) error {
// helper can be used to patch k8s resources using server-side-apply.
helper := resource.NewHelper(info.Client, info.Mapping).
WithFieldManager(fieldManager)
// server-side-apply uses unstructured JSON instead of strict typing on the client side.
data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object)
if err != nil {
return fmt.Errorf("preparing resource for server-side apply failed: encoding of resource failed: %w", err)
}
options := metav1.PatchOptions{
Force: &forceConflicts,
}
obj, err := helper.Patch(
info.Namespace,
info.Name,
types.ApplyPatchType,
data,
&options,
)
if err != nil {
return fmt.Errorf("failed to apply object %v using server-side apply: %w", info, err)
}
return info.Refresh(obj, true)
}
// GetObjects tries to marshal the resources into []*resource.Info using a resource.Builder.
func (c *Client) GetObjects(resources resources.Marshaler) ([]*resource.Info, error) {
// convert our resource struct into YAML
data, err := resources.Marshal()
if err != nil {
return nil, fmt.Errorf("converting resources to YAML failed: %w", err)
}
// read into resource.Info using builder
reader := bytes.NewReader(data)
result := c.builder.
ContinueOnError().
NamespaceParam("default").
DefaultNamespace().
Stream(reader, "yaml").
Flatten().
Do()
return result.Infos()
}

View file

@ -0,0 +1,280 @@
package client
import (
"bytes"
"errors"
"io"
"net/http"
"testing"
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
restfake "k8s.io/client-go/rest/fake"
"k8s.io/client-go/restmapper"
)
var (
corev1GV = schema.GroupVersion{Version: "v1"}
nginxDeployment = &apps.Deployment{
TypeMeta: v1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app": "nginx",
},
Name: "my-nginx",
},
Spec: apps.DeploymentSpec{
Replicas: proto.Int32(3),
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{
"app": "nginx",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app": "nginx",
},
},
Spec: k8s.PodSpec{
Containers: []k8s.Container{
{
Name: "nginx",
Image: "nginx:1.14.2",
Ports: []k8s.ContainerPort{
{
ContainerPort: 80,
},
},
},
},
},
},
},
}
nginxDeplJSON, _ = marshalJSON(nginxDeployment)
nginxDeplYAML, _ = marshalYAML(nginxDeployment)
)
type unmarshableResource struct{}
func (*unmarshableResource) Marshal() ([]byte, error) {
return nil, errors.New("someErr")
}
func stringBody(body string) io.ReadCloser {
return io.NopCloser(bytes.NewReader([]byte(body)))
}
func fakeClientWith(t *testing.T, testName string, data map[string]string) resource.FakeClientFunc {
return func(version schema.GroupVersion) (resource.RESTClient, error) {
return &restfake.RESTClient{
GroupVersion: corev1GV,
NegotiatedSerializer: scheme.Codecs.WithoutConversion(),
Client: restfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
p := req.URL.Path
q := req.URL.RawQuery
if len(q) != 0 {
p = p + "?" + q
}
body, ok := data[p]
if !ok {
t.Fatalf("%s: unexpected request: %s (%s)\n%#v", testName, p, req.URL, req)
}
header := http.Header{}
header.Set("Content-Type", runtime.ContentTypeJSON)
return &http.Response{
StatusCode: http.StatusOK,
Header: header,
Body: stringBody(body),
}, nil
}),
}, nil
}
}
func newClientWithFakes(t *testing.T, data map[string]string, objects ...runtime.Object) Client {
clientset := fake.NewSimpleClientset(objects...)
builder := resource.NewFakeBuilder(
fakeClientWith(t, "", data),
func() (meta.RESTMapper, error) {
return testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme), nil
},
func() (restmapper.CategoryExpander, error) {
return resource.FakeCategoryExpander, nil
}).
Unstructured()
client := Client{
clientset: clientset,
builder: builder,
}
return client
}
func failingClient() resource.FakeClientFunc {
return func(version schema.GroupVersion) (resource.RESTClient, error) {
return &restfake.RESTClient{
GroupVersion: corev1GV,
NegotiatedSerializer: scheme.Codecs.WithoutConversion(),
Resp: &http.Response{StatusCode: 501},
}, nil
}
}
func newFailingClient(objects ...runtime.Object) Client {
clientset := fake.NewSimpleClientset(objects...)
builder := resource.NewFakeBuilder(
failingClient(),
func() (meta.RESTMapper, error) {
return testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme), nil
},
func() (restmapper.CategoryExpander, error) {
return resource.FakeCategoryExpander, nil
}).
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...)
client := Client{
clientset: clientset,
builder: builder,
}
return client
}
func marshalJSON(obj runtime.Object) ([]byte, error) {
serializer := json.NewSerializer(json.DefaultMetaFactory, nil, nil, false)
var buf bytes.Buffer
if err := serializer.Encode(obj, &buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func marshalYAML(obj runtime.Object) ([]byte, error) {
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, nil, nil)
var buf bytes.Buffer
if err := serializer.Encode(obj, &buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func TestApplyOneObject(t *testing.T) {
testCases := map[string]struct {
httpResponseData map[string]string
expectedObj runtime.Object
resourcesYAML string
failingClient bool
expectErr bool
}{
"apply works": {
httpResponseData: map[string]string{
"/deployments/my-nginx?fieldManager=constellation-coordinator&force=true": string(nginxDeplJSON),
},
expectedObj: nginxDeployment,
resourcesYAML: string(nginxDeplYAML),
expectErr: false,
},
"apply fails": {
httpResponseData: map[string]string{},
expectedObj: nginxDeployment,
resourcesYAML: string(nginxDeplYAML),
failingClient: true,
expectErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
var client Client
if tc.failingClient {
client = newFailingClient(tc.expectedObj)
} else {
client = newClientWithFakes(t, tc.httpResponseData, tc.expectedObj)
}
reader := bytes.NewReader([]byte(tc.resourcesYAML))
res := client.builder.
ContinueOnError().
Stream(reader, "yaml").
Flatten().
Do()
assert.NoError(res.Err())
infos, err := res.Infos()
assert.NoError(err)
require.Len(infos, 1)
err = client.ApplyOneObject(infos[0], true)
if tc.expectErr {
assert.Error(err)
return
}
require.NoError(err)
})
}
}
func TestGetObjects(t *testing.T) {
testCases := map[string]struct {
expectedResources resources.Marshaler
httpResponseData map[string]string
resourcesYAML string
expectErr bool
}{
"GetObjects works on flannel deployment": {
expectedResources: resources.NewDefaultFlannelDeployment(),
resourcesYAML: string(nginxDeplYAML),
expectErr: false,
},
"GetObjects works on cluster-autoscaler deployment": {
expectedResources: resources.NewDefaultFlannelDeployment(),
resourcesYAML: string(nginxDeplYAML),
expectErr: false,
},
"GetObjects works on cloud-controller-manager deployment": {
expectedResources: resources.NewDefaultCloudControllerManagerDeployment("someProvider", "someImage", "somePath"),
resourcesYAML: string(nginxDeplYAML),
expectErr: false,
},
"GetObjects Marshal failure detected": {
expectedResources: &unmarshableResource{},
resourcesYAML: string(nginxDeplYAML),
expectErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
client := newClientWithFakes(t, tc.httpResponseData)
infos, err := client.GetObjects(tc.expectedResources)
if tc.expectErr {
assert.Error(err)
return
}
require.NoError(err)
assert.NotNil(infos)
})
}
}

View file

@ -0,0 +1,64 @@
package client
import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
)
// restClientGetter implements k8s.io/cli-runtime/pkg/resource.RESTClientGetter.
type restClientGetter struct {
clientconfig clientcmd.ClientConfig
}
// newRESTClientGetter creates a new restClientGetter using a kubeconfig.
func newRESTClientGetter(kubeconfig []byte) (*restClientGetter, error) {
clientconfig, err := clientcmd.NewClientConfigFromBytes(kubeconfig)
if err != nil {
return nil, err
}
rawconfig, err := clientconfig.RawConfig()
if err != nil {
return nil, err
}
clientconfig = clientcmd.NewDefaultClientConfig(rawconfig, &clientcmd.ConfigOverrides{})
return &restClientGetter{clientconfig}, nil
}
// ToRESTConfig returns k8s REST client config.
func (r *restClientGetter) ToRESTConfig() (*rest.Config, error) {
return r.clientconfig.ClientConfig()
}
// ToDiscoveryClient creates new k8s discovery client from restClientGetter.
func (r *restClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
restconfig, err := r.clientconfig.ClientConfig()
if err != nil {
return nil, err
}
dc, err := discovery.NewDiscoveryClientForConfig(restconfig)
if err != nil {
return nil, err
}
return memory.NewMemCacheClient(dc), nil
}
// ToRESTMapper creates new k8s RESTMapper from restClientGetter.
func (r *restClientGetter) ToRESTMapper() (meta.RESTMapper, error) {
dc, err := r.ToDiscoveryClient()
if err != nil {
return nil, err
}
return restmapper.NewDeferredDiscoveryRESTMapper(dc), nil
}
// ToRawKubeConfigLoader returns the inner k8s ClientConfig.
func (r *restClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig {
return r.clientconfig
}

View file

@ -0,0 +1,137 @@
package client
import (
"errors"
"testing"
"github.com/stretchr/testify/require"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
const testingKubeconfig = `
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ""
server: https://192.0.2.0:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: ""
client-key-data: ""
`
type stubClientConfig struct {
RawConfigConfig clientcmdapi.Config
RawConfigErr error
ClientConfigConfig *restclient.Config
ClientConfigErr error
NamespaceString string
NamespaceOverridden bool
NamespaceErr error
ConfigAccessResult clientcmd.ConfigAccess
}
func (s *stubClientConfig) RawConfig() (clientcmdapi.Config, error) {
return s.RawConfigConfig, s.RawConfigErr
}
func (s *stubClientConfig) ClientConfig() (*restclient.Config, error) {
return s.ClientConfigConfig, s.ClientConfigErr
}
func (s *stubClientConfig) Namespace() (string, bool, error) {
return s.NamespaceString, s.NamespaceOverridden, s.NamespaceErr
}
func (s *stubClientConfig) ConfigAccess() clientcmd.ConfigAccess {
return s.ConfigAccessResult
}
func TestNewRESTClientGetter(t *testing.T) {
require := require.New(t)
result, err := newRESTClientGetter([]byte(testingKubeconfig))
require.NoError(err)
require.NotNil(result)
}
func TestToRESTConfig(t *testing.T) {
require := require.New(t)
getter := restClientGetter{
clientconfig: &stubClientConfig{
ClientConfigConfig: &restclient.Config{},
},
}
result, err := getter.ToRESTConfig()
require.NoError(err)
require.NotNil(result)
}
func TestToDiscoveryClient(t *testing.T) {
require := require.New(t)
getter := restClientGetter{
clientconfig: &stubClientConfig{
ClientConfigConfig: &restclient.Config{},
},
}
result, err := getter.ToDiscoveryClient()
require.NoError(err)
require.NotNil(result)
}
func TestToDiscoveryClientFail(t *testing.T) {
require := require.New(t)
getter := restClientGetter{
clientconfig: &stubClientConfig{
ClientConfigErr: errors.New("someErr"),
},
}
_, err := getter.ToDiscoveryClient()
require.Error(err)
}
func TestToRESTMapper(t *testing.T) {
require := require.New(t)
getter := restClientGetter{
clientconfig: &stubClientConfig{
ClientConfigConfig: &restclient.Config{},
},
}
result, err := getter.ToRESTMapper()
require.NoError(err)
require.NotNil(result)
}
func TestToRESTMapperFail(t *testing.T) {
require := require.New(t)
getter := restClientGetter{
clientconfig: &stubClientConfig{
ClientConfigErr: errors.New("someErr"),
},
}
_, err := getter.ToRESTMapper()
require.Error(err)
}
func TestToRawKubeConfigLoader(t *testing.T) {
clientConfig := stubClientConfig{
ClientConfigConfig: &restclient.Config{},
}
require := require.New(t)
getter := restClientGetter{
clientconfig: &clientConfig,
}
result := getter.ToRawKubeConfigLoader()
require.Equal(&clientConfig, result)
}

View file

@ -0,0 +1,11 @@
package kubectl
import "github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/kubectl/client"
// generator implements clientGenerator interface.
type generator struct{}
// NewClients generates a new client implementing the Client interface.
func (generator) NewClient(kubeconfig []byte) (Client, error) {
return client.New(kubeconfig)
}

View file

@ -0,0 +1,68 @@
package kubectl
import (
"errors"
"fmt"
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
"k8s.io/cli-runtime/pkg/resource"
)
// ErrKubeconfigNotSet is the error value returned by Kubectl.Apply when SetKubeconfig was not called first.
var ErrKubeconfigNotSet = errors.New("kubeconfig not set")
// Client wraps marshable k8s resources into resource.Info fields and applies them in a cluster.
type Client interface {
// ApplyOneObject applies a k8s resource similar to kubectl apply.
ApplyOneObject(info *resource.Info, forceConflicts bool) error
// GetObjects converts resources into prepared info fields for use in ApplyOneObject.
GetObjects(resources resources.Marshaler) ([]*resource.Info, error)
}
// clientGenerator can generate new clients from a kubeconfig.
type clientGenerator interface {
NewClient(kubeconfig []byte) (Client, error)
}
// Kubectl implements kubernetes.Apply interface and acts like the kubernetes "kubectl" tool.
type Kubectl struct {
clientGenerator
kubeconfig []byte
}
// New creates a new kubectl using the real clientGenerator.
func New() *Kubectl {
return &Kubectl{
clientGenerator: &generator{},
}
}
// Apply will apply the given resources using server-side-apply.
func (k *Kubectl) Apply(resources resources.Marshaler, forceConflicts bool) error {
if k.kubeconfig == nil {
return ErrKubeconfigNotSet
}
client, err := k.clientGenerator.NewClient(k.kubeconfig)
if err != nil {
return err
}
// convert marshaler object into []*resource.info
infos, err := client.GetObjects(resources)
if err != nil {
return err
}
// apply each object, one by one
for i, resource := range infos {
if err := client.ApplyOneObject(resource, forceConflicts); err != nil {
return fmt.Errorf("kubectl apply of object %v/%v failed: %w", i, len(infos), err)
}
}
return nil
}
// SetKubeconfig will store the kubeconfig to generate Clients using the clientGenerator later.
func (k *Kubectl) SetKubeconfig(kubeconfig []byte) {
k.kubeconfig = kubeconfig
}

View file

@ -0,0 +1,108 @@
package kubectl
import (
"errors"
"testing"
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
"github.com/stretchr/testify/assert"
"k8s.io/cli-runtime/pkg/resource"
)
type stubClient struct {
applyOneObjectErr error
getObjectsInfos []*resource.Info
getObjectsErr error
}
func (s *stubClient) ApplyOneObject(info *resource.Info, forceConflicts bool) error {
return s.applyOneObjectErr
}
func (s *stubClient) GetObjects(resources resources.Marshaler) ([]*resource.Info, error) {
return s.getObjectsInfos, s.getObjectsErr
}
type stubClientGenerator struct {
applyOneObjectErr error
getObjectsInfos []*resource.Info
getObjectsErr error
newClientErr error
}
func (s *stubClientGenerator) NewClient(kubeconfig []byte) (Client, error) {
return &stubClient{
s.applyOneObjectErr,
s.getObjectsInfos,
s.getObjectsErr,
}, s.newClientErr
}
type dummyResource struct{}
func (*dummyResource) Marshal() ([]byte, error) {
panic("dummy")
}
func TestApplyWorks(t *testing.T) {
assert := assert.New(t)
kube := Kubectl{
clientGenerator: &stubClientGenerator{
getObjectsInfos: []*resource.Info{
{},
},
},
}
kube.SetKubeconfig([]byte("someConfig"))
assert.NoError(kube.Apply(&dummyResource{}, true))
}
func TestKubeconfigUnset(t *testing.T) {
assert := assert.New(t)
kube := Kubectl{}
assert.ErrorIs(kube.Apply(&dummyResource{}, true), ErrKubeconfigNotSet)
}
func TestClientGeneratorFails(t *testing.T) {
assert := assert.New(t)
err := errors.New("generator failed")
kube := Kubectl{
clientGenerator: &stubClientGenerator{
newClientErr: err,
},
}
kube.SetKubeconfig([]byte("someConfig"))
assert.ErrorIs(kube.Apply(&dummyResource{}, true), err)
}
func TestGetObjectsFails(t *testing.T) {
assert := assert.New(t)
err := errors.New("getObjects failed")
kube := Kubectl{
clientGenerator: &stubClientGenerator{
getObjectsErr: err,
},
}
kube.SetKubeconfig([]byte("someConfig"))
assert.ErrorIs(kube.Apply(&dummyResource{}, true), err)
}
func TestApplyOneObjectFails(t *testing.T) {
assert := assert.New(t)
err := errors.New("applyOneObject failed")
kube := Kubectl{
clientGenerator: &stubClientGenerator{
getObjectsInfos: []*resource.Info{
{},
},
applyOneObjectErr: err,
},
}
kube.SetKubeconfig([]byte("someConfig"))
assert.ErrorIs(kube.Apply(&dummyResource{}, true), err)
}

View file

@ -0,0 +1,169 @@
package resources
import (
"fmt"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type cloudControllerManagerDeployment struct {
ServiceAccount k8s.ServiceAccount
ClusterRoleBinding rbac.ClusterRoleBinding
DaemonSet apps.DaemonSet
}
// references:
// https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/admin/cloud/ccm-example.yaml
// https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager
func NewDefaultCloudControllerManagerDeployment(cloudProvider string, image string, path string) *cloudControllerManagerDeployment {
return &cloudControllerManagerDeployment{
ServiceAccount: k8s.ServiceAccount{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: meta.ObjectMeta{
Name: "cloud-controller-manager",
Namespace: "kube-system",
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: meta.ObjectMeta{
Name: "system:cloud-controller-manager",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "cluster-admin",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "cloud-controller-manager",
Namespace: "kube-system",
},
},
},
DaemonSet: apps.DaemonSet{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "cloud-controller-manager",
},
Name: "cloud-controller-manager",
Namespace: "kube-system",
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "cloud-controller-manager",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "cloud-controller-manager",
},
},
Spec: k8s.PodSpec{
ServiceAccountName: "cloud-controller-manager",
Containers: []k8s.Container{
{
Name: "cloud-controller-manager",
Image: image,
Command: []string{
path,
fmt.Sprintf("--cloud-provider=%s", cloudProvider),
"--leader-elect=true",
"--allocate-node-cidrs=false",
"--configure-cloud-routes=false",
"--controllers=cloud-node,cloud-node-lifecycle",
"--use-service-account-credentials",
"--cluster-cidr=10.244.0.0/16",
"-v=2",
},
VolumeMounts: []k8s.VolumeMount{
{
MountPath: "/etc/kubernetes",
Name: "etckubernetes",
ReadOnly: true,
},
{
MountPath: "/etc/ssl",
Name: "etcssl",
ReadOnly: true,
},
{
MountPath: "/etc/pki",
Name: "etcpki",
ReadOnly: true,
},
{
MountPath: "/etc/gce.conf",
Name: "gceconf",
ReadOnly: true,
},
},
},
},
Volumes: []k8s.Volume{
{
Name: "etckubernetes",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{Path: "/etc/kubernetes"},
},
},
{
Name: "etcssl",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{Path: "/etc/ssl"},
},
},
{
Name: "etcpki",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{Path: "/etc/pki"},
},
},
{
Name: "gceconf",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{Path: "/etc/gce.conf"},
},
},
},
Tolerations: []k8s.Toleration{
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/master",
Effect: k8s.TaintEffectNoSchedule,
},
},
NodeSelector: map[string]string{
"node-role.kubernetes.io/master": "",
},
},
},
},
},
}
}
func (c *cloudControllerManagerDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(c)
}

View file

@ -0,0 +1,21 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCloudControllerMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
cloudControllerManagerDepl := NewDefaultCloudControllerManagerDeployment("dummy-cloudprovider", "some-image:latest", "/dummy_path")
data, err := cloudControllerManagerDepl.Marshal()
require.NoError(err)
var recreated cloudControllerManagerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(cloudControllerManagerDepl, &recreated)
}

View file

@ -0,0 +1,496 @@
package resources
import (
"google.golang.org/protobuf/proto"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
rbac "k8s.io/api/rbac/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type autoscalerDeployment struct {
PodDisruptionBudget policy.PodDisruptionBudget
ServiceAccount k8s.ServiceAccount
ClusterRole rbac.ClusterRole
ClusterRoleBinding rbac.ClusterRoleBinding
Role rbac.Role
RoleBinding rbac.RoleBinding
Service k8s.Service
Deployment apps.Deployment
}
func NewDefaultAutoscalerDeployment() *autoscalerDeployment {
return &autoscalerDeployment{
PodDisruptionBudget: policy.PodDisruptionBudget{
TypeMeta: v1.TypeMeta{
APIVersion: "policy/v1",
Kind: "PodDisruptionBudget",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "default",
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
},
MaxUnavailable: &intstr.IntOrString{
Type: intstr.Int,
IntVal: 1,
},
},
},
ServiceAccount: k8s.ServiceAccount{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
AutomountServiceAccountToken: proto.Bool(true),
},
ClusterRole: rbac.ClusterRole{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{
"events",
"endpoints",
},
Verbs: []string{
"create",
"patch",
},
},
{
APIGroups: []string{""},
Resources: []string{
"pods/eviction",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{""},
Resources: []string{
"pods/status",
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{""},
Resources: []string{
"endpoints",
},
ResourceNames: []string{
"cluster-autoscaler",
},
Verbs: []string{
"get",
"update",
},
},
{
APIGroups: []string{""},
Resources: []string{
"nodes",
},
Verbs: []string{
"watch",
"list",
"get",
"update",
},
},
{
APIGroups: []string{""},
Resources: []string{
"namespaces",
"pods",
"services",
"replicationcontrollers",
"persistentvolumeclaims",
"persistentvolumes",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"batch",
},
Resources: []string{
"jobs",
"cronjobs",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"batch",
"extensions",
},
Resources: []string{
"jobs",
},
Verbs: []string{
"get",
"list",
"patch",
"watch",
},
},
{
APIGroups: []string{
"extensions",
},
Resources: []string{
"replicasets",
"daemonsets",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"policy",
},
Resources: []string{
"poddisruptionbudgets",
},
Verbs: []string{
"watch",
"list",
},
},
{
APIGroups: []string{
"apps",
},
Resources: []string{
"daemonsets",
"replicasets",
"statefulsets",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"storage.k8s.io",
},
Resources: []string{
"storageclasses",
"csinodes",
"csidrivers",
"csistoragecapacities",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{""},
Resources: []string{
"configmaps",
},
Verbs: []string{
"list",
"watch",
},
},
{
APIGroups: []string{
"coordination.k8s.io",
},
Resources: []string{
"leases",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{
"coordination.k8s.io",
},
ResourceNames: []string{
"cluster-autoscaler",
},
Resources: []string{
"leases",
},
Verbs: []string{
"get",
"update",
},
},
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "constellation-cluster-autoscaler",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
},
},
Role: rbac.Role{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{
"configmaps",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{""},
Resources: []string{
"configmaps",
},
ResourceNames: []string{
"cluster-autoscaler-status",
},
Verbs: []string{
"delete",
"get",
"update",
},
},
},
},
RoleBinding: rbac.RoleBinding{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "constellation-cluster-autoscaler",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
},
},
Service: k8s.Service{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "default",
},
Spec: k8s.ServiceSpec{
Ports: []k8s.ServicePort{
{
Port: 8085,
Protocol: k8s.ProtocolTCP,
TargetPort: intstr.FromInt(8085),
Name: "http",
},
},
Selector: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
Type: k8s.ServiceTypeClusterIP,
},
},
Deployment: apps.Deployment{
TypeMeta: v1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
Spec: apps.DeploymentSpec{
Replicas: proto.Int32(1),
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
},
Spec: k8s.PodSpec{
PriorityClassName: "system-cluster-critical",
DNSPolicy: k8s.DNSClusterFirst,
Containers: []k8s.Container{
{
Name: "cluster-autoscaler",
Image: "k8s.gcr.io/autoscaling/cluster-autoscaler:v1.21.1",
ImagePullPolicy: k8s.PullIfNotPresent,
LivenessProbe: &k8s.Probe{
ProbeHandler: k8s.ProbeHandler{
HTTPGet: &k8s.HTTPGetAction{
Path: "/health-check",
Port: intstr.FromInt(8085),
},
},
},
Ports: []k8s.ContainerPort{
{
ContainerPort: 8085,
},
},
},
},
ServiceAccountName: "constellation-cluster-autoscaler",
Tolerations: []k8s.Toleration{
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
},
NodeSelector: map[string]string{
"node-role.kubernetes.io/control-plane": "",
},
},
},
},
},
}
}
func (a *autoscalerDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(a)
}
func (a *autoscalerDeployment) SetAutoscalerCommand(cloudprovider string, autoscalingNodeGroups []string) {
command := []string{
"./cluster-autoscaler",
"--cloud-provider",
cloudprovider,
"--logtostderr=true",
"--stderrthreshold=info",
"--v=2",
"--namespace=kube-system",
}
for _, autoscalingNodeGroup := range autoscalingNodeGroups {
command = append(command, "--nodes", autoscalingNodeGroup)
}
a.Deployment.Spec.Template.Spec.Containers[0].Command = command
}

View file

@ -0,0 +1,41 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAutoscalerDeploymentMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
autoscalerDepl := NewDefaultAutoscalerDeployment()
data, err := autoscalerDepl.Marshal()
require.NoError(err)
t.Log(string(data))
var recreated autoscalerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(autoscalerDepl, &recreated)
}
func TestAutoscalerDeploymentWithCommandMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
autoscalerDepl := NewDefaultAutoscalerDeployment()
autoscalerDepl.SetAutoscalerCommand("someProvider", []string{"group1", "group2"})
data, err := autoscalerDepl.Marshal()
require.NoError(err)
t.Log(string(data))
var recreated autoscalerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(autoscalerDepl, &recreated)
}

View file

@ -0,0 +1,339 @@
package resources
import (
"google.golang.org/protobuf/proto"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
rbac "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
cniConfJSON = `{"name":"cbr0","cniVersion":"0.3.1","plugins":[{"type":"flannel","delegate":{"hairpinMode":true,"isDefaultGateway":true}},{"type":"portmap","capabilities":{"portMappings":true}}]}`
netConfJSON = `{"Network":"10.244.0.0/16","Backend":{"Type":"vxlan"}}`
)
// Reference: https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
// Changes compared to the reference: added the wireguard interface "wg0" to the args of the "kube-flannel" container of the DaemonSet.
type FlannelDeployment struct {
PodSecurityPolicy policy.PodSecurityPolicy
ClusterRole rbac.ClusterRole
ClusterRoleBinding rbac.ClusterRoleBinding
ServiceAccount k8s.ServiceAccount
ConfigMap k8s.ConfigMap
DaemonSet apps.DaemonSet
}
func NewDefaultFlannelDeployment() *FlannelDeployment {
return &FlannelDeployment{
PodSecurityPolicy: policy.PodSecurityPolicy{
TypeMeta: v1.TypeMeta{
APIVersion: "policy/v1beta1",
Kind: "PodSecurityPolicy",
},
ObjectMeta: v1.ObjectMeta{
Name: "psp.flannel.unprivileged",
Annotations: map[string]string{
"seccomp.security.alpha.kubernetes.io/allowedProfileNames": "docker/default",
"seccomp.security.alpha.kubernetes.io/defaultProfileName": "docker/default",
"apparmor.security.beta.kubernetes.io/allowedProfileNames": "runtime/default",
"apparmor.security.beta.kubernetes.io/defaultProfileName": "runtime/default",
},
},
Spec: policy.PodSecurityPolicySpec{
Privileged: false,
Volumes: []policy.FSType{
policy.FSType("configMap"),
policy.FSType("secret"),
policy.FSType("emptyDir"),
policy.FSType("hostPath"),
},
AllowedHostPaths: []policy.AllowedHostPath{
{PathPrefix: "/etc/cni/net.d"},
{PathPrefix: "/etc/kube-flannel"},
{PathPrefix: "/run/flannel"},
},
ReadOnlyRootFilesystem: false,
RunAsUser: policy.RunAsUserStrategyOptions{
Rule: policy.RunAsUserStrategyRunAsAny,
},
SupplementalGroups: policy.SupplementalGroupsStrategyOptions{
Rule: policy.SupplementalGroupsStrategyRunAsAny,
},
FSGroup: policy.FSGroupStrategyOptions{
Rule: policy.FSGroupStrategyRunAsAny,
},
AllowPrivilegeEscalation: proto.Bool(false),
DefaultAllowPrivilegeEscalation: proto.Bool(false),
AllowedCapabilities: []k8s.Capability{
k8s.Capability("NET_ADMIN"),
k8s.Capability("NET_RAW"),
},
HostPID: false,
HostIPC: false,
HostNetwork: true,
HostPorts: []policy.HostPortRange{
{Min: 0, Max: 65535},
},
SELinux: policy.SELinuxStrategyOptions{
Rule: policy.SELinuxStrategyRunAsAny,
},
},
},
ClusterRole: rbac.ClusterRole{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: v1.ObjectMeta{
Name: "flannel",
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{"extensions"},
Resources: []string{"podsecuritypolicies"},
Verbs: []string{"use"},
ResourceNames: []string{"psp.flannel.unprivileged"},
},
{
APIGroups: []string{""},
Resources: []string{"pods"},
Verbs: []string{"get"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"list", "watch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes/status"},
Verbs: []string{"patch"},
},
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: v1.TypeMeta{
Kind: "ClusterRoleBinding",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: v1.ObjectMeta{
Name: "flannel",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "flannel",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "flannel",
Namespace: "kube-system",
},
},
},
ServiceAccount: k8s.ServiceAccount{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: v1.ObjectMeta{
Name: "flannel",
Namespace: "kube-system",
},
},
ConfigMap: k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: "kube-flannel-cfg",
Namespace: "kube-system",
Labels: map[string]string{
"tier": "node",
"app": "flannel",
},
},
Data: map[string]string{
"cni-conf.json": cniConfJSON,
"net-conf.json": netConfJSON,
},
},
DaemonSet: apps.DaemonSet{
TypeMeta: v1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: v1.ObjectMeta{
Name: "kube-flannel-ds",
Namespace: "kube-system",
Labels: map[string]string{
"tier": "node",
"app": "flannel",
},
},
Spec: apps.DaemonSetSpec{
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{"app": "flannel"},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"tier": "node",
"app": "flannel",
},
},
Spec: k8s.PodSpec{
Affinity: &k8s.Affinity{
NodeAffinity: &k8s.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &k8s.NodeSelector{
NodeSelectorTerms: []k8s.NodeSelectorTerm{
{MatchExpressions: []k8s.NodeSelectorRequirement{
{
Key: "kubernetes.io/os",
Operator: k8s.NodeSelectorOpIn,
Values: []string{"linux"},
},
}},
},
},
},
},
HostNetwork: true,
PriorityClassName: "system-node-critical",
Tolerations: []k8s.Toleration{
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
ServiceAccountName: "flannel",
InitContainers: []k8s.Container{
{
Name: "install-cni-plugin",
Image: "rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.0",
Command: []string{"cp"},
Args: []string{"-f", "/flannel", "/opt/cni/bin/flannel"},
VolumeMounts: []k8s.VolumeMount{
{
Name: "cni-plugin",
MountPath: "/opt/cni/bin",
},
},
},
{
Name: "install-cni",
Image: "quay.io/coreos/flannel:v0.15.1",
Command: []string{"cp"},
Args: []string{"-f", "/etc/kube-flannel/cni-conf.json", "/etc/cni/net.d/10-flannel.conflist"},
VolumeMounts: []k8s.VolumeMount{
{
Name: "cni",
MountPath: "/etc/cni/net.d",
},
{
Name: "flannel-cfg",
MountPath: "/etc/kube-flannel/",
},
},
},
},
Containers: []k8s.Container{
{
Name: "kube-flannel",
Image: "quay.io/coreos/flannel:v0.15.1",
Command: []string{"/opt/bin/flanneld"},
Args: []string{"--ip-masq", "--kube-subnet-mgr", "--iface", "wg0"},
Resources: k8s.ResourceRequirements{
Requests: k8s.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("50Mi"),
},
Limits: k8s.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("50Mi"),
},
},
SecurityContext: &k8s.SecurityContext{
Privileged: proto.Bool(false),
Capabilities: &k8s.Capabilities{
Add: []k8s.Capability{k8s.Capability("NET_ADMIN"), k8s.Capability("NET_RAW")},
},
},
Env: []k8s.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &k8s.EnvVarSource{
FieldRef: &k8s.ObjectFieldSelector{FieldPath: "metadata.name"},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &k8s.EnvVarSource{
FieldRef: &k8s.ObjectFieldSelector{FieldPath: "metadata.namespace"},
},
},
},
VolumeMounts: []k8s.VolumeMount{
{
Name: "run",
MountPath: "/run/flannel",
},
{
Name: "flannel-cfg",
MountPath: "/etc/kube-flannel/",
},
},
},
},
Volumes: []k8s.Volume{
{
Name: "run",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{
Path: "/run/flannel",
},
},
},
{
Name: "cni-plugin",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{
Path: "/opt/cni/bin",
},
},
},
{
Name: "cni",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{
Path: "/etc/cni/net.d",
},
},
},
{
Name: "flannel-cfg",
VolumeSource: k8s.VolumeSource{
ConfigMap: &k8s.ConfigMapVolumeSource{
LocalObjectReference: k8s.LocalObjectReference{
Name: "kube-flannel-cfg",
},
},
},
},
},
},
},
},
},
}
}
func (f *FlannelDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(f)
}

View file

@ -0,0 +1,21 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFlannelDeployment(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
flannelDeployment := NewDefaultFlannelDeployment()
data, err := flannelDeployment.Marshal()
require.NoError(err)
var recreated FlannelDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(flannelDeployment, &recreated)
}

View file

@ -0,0 +1,124 @@
package resources
import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"gopkg.in/yaml.v3"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/client-go/kubernetes/scheme"
)
// Marshaler is used by all k8s resources that can be marshaled to YAML.
type Marshaler interface {
Marshal() ([]byte, error)
}
// MarshalK8SResources marshals every field of a struct into a k8s resource YAML.
func MarshalK8SResources(resources interface{}) ([]byte, error) {
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, nil, nil)
var buf bytes.Buffer
// reflect over struct containing fields that are k8s resources
value := reflect.ValueOf(resources)
if value.Kind() != reflect.Ptr && value.Kind() != reflect.Interface {
return nil, errors.New("marshal on non-pointer called")
}
elem := value.Elem()
if elem.Kind() == reflect.Struct {
// iterate over all struct fields
for i := 0; i < elem.NumField(); i++ {
field := elem.Field(i)
var inter interface{}
// check if value can be converted to interface
if field.CanInterface() {
inter = field.Addr().Interface()
} else {
continue
}
// convert field interface to runtime.Object
obj, ok := inter.(runtime.Object)
if !ok {
continue
}
if i > 0 {
// separate YAML documents
buf.Write([]byte("---\n"))
}
// serialize k8s resource
if err := serializer.Encode(obj, &buf); err != nil {
return nil, err
}
}
}
return buf.Bytes(), nil
}
// UnmarshalK8SResources takes YAML and converts it into a k8s resources struct.
func UnmarshalK8SResources(data []byte, into interface{}) error {
// reflect over struct containing fields that are k8s resources
value := reflect.ValueOf(into).Elem()
if value.Kind() != reflect.Struct {
return errors.New("can only reflect over struct")
}
decoder := serializer.NewCodecFactory(scheme.Scheme).UniversalDecoder()
documents, err := splitYAML(data)
if err != nil {
return fmt.Errorf("unable to split deployment YAML into multiple documents: %w", err)
}
if len(documents) != value.NumField() {
return fmt.Errorf("expected %v YAML documents, got %v", value.NumField(), len(documents))
}
for i := 0; i < value.NumField(); i++ {
field := value.Field(i)
var inter interface{}
// check if value can be converted to interface
if !field.CanInterface() {
return fmt.Errorf("cannot use struct field %v as interface", i)
}
inter = field.Addr().Interface()
// convert field interface to runtime.Object
obj, ok := inter.(runtime.Object)
if !ok {
return fmt.Errorf("cannot convert struct field %v as k8s runtime object", i)
}
// decode YAML document into struct field
if err := runtime.DecodeInto(decoder, documents[i], obj); err != nil {
return err
}
}
return nil
}
// splitYAML splits a YAML multidoc into a slice of multiple YAML docs.
func splitYAML(resources []byte) ([][]byte, error) {
dec := yaml.NewDecoder(bytes.NewReader(resources))
var res [][]byte
for {
var value interface{}
err := dec.Decode(&value)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, err
}
valueBytes, err := yaml.Marshal(value)
if err != nil {
return nil, err
}
res = append(res, valueBytes)
}
return res, nil
}

View file

@ -0,0 +1,33 @@
package k8sapi
import (
"context"
"fmt"
"github.com/coreos/go-systemd/v22/dbus"
)
func RestartSystemdUnit(unit string) error {
ctx := context.Background()
conn, err := dbus.NewSystemdConnectionContext(ctx)
if err != nil {
return fmt.Errorf("establishing systemd connection failed: %w", err)
}
restartChan := make(chan string)
if _, err := conn.RestartUnitContext(ctx, unit, "replace", restartChan); err != nil {
return fmt.Errorf("restarting systemd unit \"%v\" failed: %w", unit, err)
}
// Wait for the restart to finish and actually check if it was
// successful or not.
result := <-restartChan
switch result {
case "done":
return nil
default:
return fmt.Errorf("restarting systemd unit \"%v\" failed: expected %v but received %v", unit, "done", result)
}
}

View file

@ -0,0 +1,134 @@
package k8sapi
import (
"errors"
"fmt"
"os"
"os/exec"
"strings"
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)
// kubeConfig is the path to the kubernetes admin config (used for authentication).
const kubeConfig = "/etc/kubernetes/admin.conf"
// Client provides the functionality of `kubectl apply`.
type Client interface {
Apply(resources resources.Marshaler, forceConflicts bool) error
SetKubeconfig(kubeconfig []byte)
// TODO: add tolerations
}
type ClusterUtil interface {
InitCluster(initConfig []byte) (*kubeadm.BootstrapTokenDiscovery, error)
JoinCluster(joinConfig []byte) error
SetupPodNetwork(kubectl Client, podNetworkConfiguration resources.Marshaler) error
SetupAutoscaling(kubectl Client, clusterAutoscalerConfiguration resources.Marshaler) error
SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration resources.Marshaler) error
RestartKubelet() error
}
type KubernetesUtil struct{}
func (k *KubernetesUtil) InitCluster(initConfig []byte) (*kubeadm.BootstrapTokenDiscovery, error) {
initConfigFile, err := os.CreateTemp("", "kubeadm-init.*.yaml")
if err != nil {
return nil, fmt.Errorf("failed to create init config file %v: %w", initConfigFile.Name(), err)
}
defer os.Remove(initConfigFile.Name())
if _, err := initConfigFile.Write(initConfig); err != nil {
return nil, fmt.Errorf("writing kubeadm init yaml config %v failed: %w", initConfigFile.Name(), err)
}
cmd := exec.Command("kubeadm", "init", "--config", initConfigFile.Name())
stdout, err := cmd.Output()
if err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
return nil, fmt.Errorf("kubeadm init failed (code %v) with: %s", exitErr.ExitCode(), exitErr.Stderr)
}
return nil, fmt.Errorf("kubeadm init failed: %w", err)
}
stdoutStr := string(stdout)
indexKubeadmJoin := strings.Index(stdoutStr, "kubeadm join")
if indexKubeadmJoin < 0 {
return nil, errors.New("kubeadm init did not return join command")
}
joinCommand := strings.ReplaceAll(stdoutStr[indexKubeadmJoin:], "\\\n", " ")
// `kubeadm init` returns the two join commands, each broken up into two lines with backslash + newline in between.
// The following functions assume that stdoutStr[indexKubeadmJoin:] look like the following string.
// -----------------------------------------------------------------------------------------------
// --- When modifying the kubeadm.InitConfiguration make sure that this assumption still holds ---
// -----------------------------------------------------------------------------------------------
// "kubeadm join 127.0.0.1:16443 --token vlhjr4.9l6lhek0b9v65m67 \
// --discovery-token-ca-cert-hash sha256:2b5343a162e31b70602e3cab3d87189dc10431e869633c4db63c3bfcd038dee6 \
// --control-plane
//
// Then you can join any number of worker nodes by running the following on each as root:
//
// kubeadm join 127.0.0.1:16443 --token vlhjr4.9l6lhek0b9v65m67 \
// --discovery-token-ca-cert-hash sha256:2b5343a162e31b70602e3cab3d87189dc10431e869633c4db63c3bfcd038dee6"
// Splits the string into a slice, where earch slice-element contains one line from the previous string
splittedJoinCommand := strings.SplitN(joinCommand, "\n", 2)
return ParseJoinCommand(splittedJoinCommand[0])
}
// SetupPodNetwork sets up the flannel pod network.
func (k *KubernetesUtil) SetupPodNetwork(kubectl Client, podNetworkConfiguration resources.Marshaler) error {
if err := kubectl.Apply(podNetworkConfiguration, true); err != nil {
return err
}
// allow coredns to run on uninitialized nodes (required by cloud-controller-manager)
err := exec.Command("kubectl", "--kubeconfig", kubeConfig, "-n", "kube-system", "patch", "deployment", "coredns", "--type", "json", "-p", "[{\"op\":\"add\",\"path\":\"/spec/template/spec/tolerations/-\",\"value\":{\"key\":\"node.cloudprovider.kubernetes.io/uninitialized\",\"value\":\"true\",\"effect\":\"NoSchedule\"}}]").Run()
if err != nil {
return err
}
return exec.Command("kubectl", "--kubeconfig", kubeConfig, "-n", "kube-system", "patch", "deployment", "coredns", "--type", "json", "-p", "[{\"op\":\"add\",\"path\":\"/spec/template/spec/tolerations/-\",\"value\":{\"key\":\"node.kubernetes.io/network-unavailable\",\"value\":\"\",\"effect\":\"NoSchedule\"}}]").Run()
}
func (k *KubernetesUtil) SetupAutoscaling(kubectl Client, clusterAutoscalerConfiguration resources.Marshaler) error {
return kubectl.Apply(clusterAutoscalerConfiguration, true)
}
func (k *KubernetesUtil) SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration resources.Marshaler) error {
return kubectl.Apply(cloudControllerManagerConfiguration, true)
}
// JoinCluster joins existing kubernetes cluster using kubeadm join.
func (k *KubernetesUtil) JoinCluster(joinConfig []byte) error {
joinConfigFile, err := os.CreateTemp("", "kubeadm-join.*.yaml")
if err != nil {
return fmt.Errorf("failed to create join config file %v: %w", joinConfigFile.Name(), err)
}
defer os.Remove(joinConfigFile.Name())
if _, err := joinConfigFile.Write(joinConfig); err != nil {
return fmt.Errorf("writing kubeadm init yaml config %v failed: %w", joinConfigFile.Name(), err)
}
// run `kubeadm join` to join a worker node to an existing kubernetes cluster
cmd := exec.Command("kubeadm", "join", "--config", joinConfigFile.Name())
if _, err := cmd.Output(); err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
return fmt.Errorf("kubeadm join failed (code %v) with: %s", exitErr.ExitCode(), exitErr.Stderr)
}
return fmt.Errorf("kubeadm join failed: %w", err)
}
return nil
}
// RestartKubelet restarts a kubelet.
func (k *KubernetesUtil) RestartKubelet() error {
return RestartSystemdUnit("kubelet.service")
}