bootstrapper: remove cilium restart fix

Tests concluded that restating the Cilium agent after the
first boot is not needed anymore to regain connectivity for
pods.
This commit is contained in:
Leonard Cohnen 2023-11-03 20:45:32 +01:00 committed by 3u13r
parent 1972b635b4
commit 79f562374a
9 changed files with 4 additions and 207 deletions

View file

@ -14,7 +14,6 @@ import (
"errors"
"fmt"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
@ -250,71 +249,6 @@ type SetupPodNetworkInput struct {
LoadBalancerPort string
}
// WaitForCilium waits until Cilium reports a healthy status over its /healthz endpoint.
func (k *KubernetesUtil) WaitForCilium(ctx context.Context, log *logger.Logger) error {
// wait for cilium pod to be healthy
client := http.Client{}
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
time.Sleep(5 * time.Second)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://127.0.0.1:9879/healthz", http.NoBody)
if err != nil {
return fmt.Errorf("unable to create request: %w", err)
}
resp, err := client.Do(req)
if err != nil {
log.With(zap.Error(err)).Infof("Waiting for local Cilium DaemonSet - Pod not healthy yet")
continue
}
resp.Body.Close()
if resp.StatusCode == 200 {
return nil
}
}
}
}
// FixCilium fixes https://github.com/cilium/cilium/issues/19958
// Instead of a rollout restart of the Cilium DaemonSet, it only restarts the local Cilium Pod.
func (k *KubernetesUtil) FixCilium(ctx context.Context) error {
// get cilium container id
out, err := exec.CommandContext(ctx, "/run/state/bin/crictl", "ps", "--name", "cilium-agent", "-q").CombinedOutput()
if err != nil {
return fmt.Errorf("getting cilium container id failed: %s", out)
}
outLines := strings.Split(string(out), "\n")
if len(outLines) < 2 {
return fmt.Errorf("getting cilium container id returned invalid output: %s", out)
}
containerID := outLines[len(outLines)-2]
// get cilium pod id
out, err = exec.CommandContext(ctx, "/run/state/bin/crictl", "inspect", "-o", "go-template", "--template", "{{ .info.sandboxID }}", containerID).CombinedOutput()
if err != nil {
return fmt.Errorf("getting Cilium Pod ID failed: %s", out)
}
outLines = strings.Split(string(out), "\n")
if len(outLines) < 2 {
return fmt.Errorf("getting Cilium Pod ID returned invalid output: %s", out)
}
podID := outLines[len(outLines)-2]
// stop and delete pod
out, err = exec.CommandContext(ctx, "/run/state/bin/crictl", "stopp", podID).CombinedOutput()
if err != nil {
return fmt.Errorf("stopping Cilium agent Pod failed: %s", out)
}
out, err = exec.CommandContext(ctx, "/run/state/bin/crictl", "rmp", podID).CombinedOutput()
if err != nil {
return fmt.Errorf("removing Cilium agent Pod failed: %s", out)
}
return nil
}
// JoinCluster joins existing Kubernetes cluster using kubeadm join.
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneHost, controlPlanePort string, log *logger.Logger) error {
// TODO(3u13r): audit policy should be user input

View file

@ -19,7 +19,5 @@ type clusterUtil interface {
InstallComponents(ctx context.Context, kubernetesComponents components.Components) error
InitCluster(ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, controlPlaneHost, controlPlanePort string, conformanceMode bool, log *logger.Logger) ([]byte, error)
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneHost, controlPlanePort string, log *logger.Logger) error
WaitForCilium(ctx context.Context, log *logger.Logger) error
FixCilium(ctx context.Context) error
StartKubelet() error
}

View file

@ -242,17 +242,6 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err)
}
log.Infof("Waiting for Cilium to become healthy")
if err := k.clusterUtil.WaitForCilium(context.Background(), log); err != nil {
return fmt.Errorf("waiting for Cilium to become healthy: %w", err)
}
log.Infof("Restarting Cilium")
if err := k.clusterUtil.FixCilium(context.Background()); err != nil {
log.With(zap.Error(err)).Errorf("FixCilium failed")
// Continue and don't throw an error here - things might be okay.
}
return nil
}
@ -307,22 +296,11 @@ func k8sCompliantHostname(in string) (string, error) {
}
// StartKubelet starts the kubelet service.
func (k *KubeWrapper) StartKubelet(log *logger.Logger) error {
func (k *KubeWrapper) StartKubelet() error {
if err := k.clusterUtil.StartKubelet(); err != nil {
return fmt.Errorf("starting kubelet: %w", err)
}
log.Infof("Waiting for Cilium to become healthy")
if err := k.clusterUtil.WaitForCilium(context.Background(), log); err != nil {
return fmt.Errorf("waiting for Cilium to become healthy: %w", err)
}
log.Infof("Restarting Cilium")
if err := k.clusterUtil.FixCilium(context.Background()); err != nil {
log.With(zap.Error(err)).Errorf("FixCilium failed")
// Continue and don't throw an error here - things might be okay.
}
return nil
}

View file

@ -479,14 +479,6 @@ func (s *stubClusterUtil) StartKubelet() error {
return s.startKubeletErr
}
func (s *stubClusterUtil) WaitForCilium(_ context.Context, _ *logger.Logger) error {
return nil
}
func (s *stubClusterUtil) FixCilium(_ context.Context) error {
return nil
}
type stubConfigProvider struct {
initConfig k8sapi.KubeadmInitYAML
joinConfig k8sapi.KubeadmJoinYAML