mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-05-02 14:26:23 -04:00
bootstrapper: remove cilium restart fix
Tests concluded that restating the Cilium agent after the first boot is not needed anymore to regain connectivity for pods.
This commit is contained in:
parent
1972b635b4
commit
79f562374a
9 changed files with 4 additions and 207 deletions
|
@ -7,7 +7,6 @@ go_library(
|
|||
"action.go",
|
||||
"actionfactory.go",
|
||||
"chartutil.go",
|
||||
"ciliumhelper.go",
|
||||
"helm.go",
|
||||
"loader.go",
|
||||
"overrides.go",
|
||||
|
@ -474,9 +473,6 @@ go_library(
|
|||
"//internal/semver",
|
||||
"//internal/versions",
|
||||
"@com_github_pkg_errors//:errors",
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
|
||||
"@io_k8s_client_go//kubernetes",
|
||||
"@io_k8s_client_go//tools/clientcmd",
|
||||
"@io_k8s_client_go//util/retry",
|
||||
"@sh_helm_helm//pkg/ignore",
|
||||
"@sh_helm_helm_v3//pkg/action",
|
||||
|
|
|
@ -11,10 +11,8 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
|
@ -133,36 +131,9 @@ func (a actionFactory) appendNewAction(release Release, configTargetVersion semv
|
|||
|
||||
func (a actionFactory) newInstall(release Release) *installAction {
|
||||
action := &installAction{helmAction: newHelmInstallAction(a.cfg, release), release: release, log: a.log}
|
||||
if action.ReleaseName() == ciliumInfo.releaseName {
|
||||
action.postInstall = func(ctx context.Context) error {
|
||||
return ciliumPostInstall(ctx, a.log)
|
||||
}
|
||||
}
|
||||
return action
|
||||
}
|
||||
|
||||
func ciliumPostInstall(ctx context.Context, log debugLog) error {
|
||||
log.Debugf("Waiting for Cilium to become ready")
|
||||
helper, err := newK8sCiliumHelper(constants.AdminConfFilename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating Kubernetes client: %w", err)
|
||||
}
|
||||
timeToStartWaiting := time.Now()
|
||||
// TODO(3u13r): Reduce the timeout when we switched the package repository - this is only this high because we once
|
||||
// saw polling times of ~16 minutes when hitting a slow PoP from Fastly (GitHub's / ghcr.io CDN).
|
||||
if err := helper.WaitForDS(ctx, "kube-system", "cilium", log); err != nil {
|
||||
return fmt.Errorf("waiting for Cilium to become healthy: %w", err)
|
||||
}
|
||||
timeUntilFinishedWaiting := time.Since(timeToStartWaiting)
|
||||
log.Debugf("Cilium became healthy after %s", timeUntilFinishedWaiting.String())
|
||||
|
||||
log.Debugf("Fix Cilium through restart")
|
||||
if err := helper.RestartDS("kube-system", "cilium"); err != nil {
|
||||
return fmt.Errorf("restarting Cilium: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a actionFactory) newUpgrade(release Release) *upgradeAction {
|
||||
action := &upgradeAction{helmAction: newHelmUpgradeAction(a.cfg), release: release, log: a.log}
|
||||
if release.ReleaseName == constellationOperatorsInfo.releaseName {
|
||||
|
|
|
@ -1,72 +0,0 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package helm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
type k8sDsClient struct {
|
||||
clientset *kubernetes.Clientset
|
||||
}
|
||||
|
||||
func newK8sCiliumHelper(kubeconfigPath string) (*k8sDsClient, error) {
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &k8sDsClient{clientset: clientset}, nil
|
||||
}
|
||||
|
||||
// WaitForDS waits for a DaemonSet to become ready.
|
||||
func (h *k8sDsClient) WaitForDS(ctx context.Context, namespace, name string, log debugLog) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("context expired before DaemonSet %q became ready", name)
|
||||
default:
|
||||
ds, err := h.clientset.AppsV1().DaemonSets(namespace).Get(ctx, name, v1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ds.Status.NumberReady == ds.Status.DesiredNumberScheduled {
|
||||
log.Debugf("DaemonSet %s is ready\n", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("Waiting for DaemonSet %s to become ready...\n", name)
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RestartDS restarts all pods of a DaemonSet by updating its template.
|
||||
func (h *k8sDsClient) RestartDS(namespace, name string) error {
|
||||
ds, err := h.clientset.AppsV1().DaemonSets(namespace).Get(context.Background(), name, v1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ds.Spec.Template.ObjectMeta.Annotations["restartTimestamp"] = fmt.Sprintf("%d", time.Now().Unix())
|
||||
_, err = h.clientset.AppsV1().DaemonSets(namespace).Update(context.Background(), ds, v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue