mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-10-01 01:36:09 -04:00
ci: add upgrade to provider example test (#2775)
This commit is contained in:
parent
2fea43a320
commit
60a0a6020e
175
.github/workflows/e2e-test-provider-example.yml
vendored
175
.github/workflows/e2e-test-provider-example.yml
vendored
@ -23,6 +23,14 @@ on:
|
|||||||
providerVersion:
|
providerVersion:
|
||||||
description: "Constellation Terraform provider version to use (with v prefix). Empty value means build from source."
|
description: "Constellation Terraform provider version to use (with v prefix). Empty value means build from source."
|
||||||
type: string
|
type: string
|
||||||
|
toImage:
|
||||||
|
description: Image (shortpath) the cluster is upgraded to, or empty for main/nightly.
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
toKubernetes:
|
||||||
|
description: Kubernetes version to target for the upgrade, empty for no upgrade.
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
workflow_call:
|
workflow_call:
|
||||||
inputs:
|
inputs:
|
||||||
ref:
|
ref:
|
||||||
@ -41,6 +49,14 @@ on:
|
|||||||
providerVersion:
|
providerVersion:
|
||||||
description: "Constellation Terraform provider version to use (with v prefix). Empty value means build from source."
|
description: "Constellation Terraform provider version to use (with v prefix). Empty value means build from source."
|
||||||
type: string
|
type: string
|
||||||
|
toImage:
|
||||||
|
description: Image (shortpath) the cluster is upgraded to, or empty for main/nightly.
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
toKubernetes:
|
||||||
|
description: Kubernetes version to target for the upgrade, empty for target's default version.
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
provider-example-test:
|
provider-example-test:
|
||||||
@ -94,6 +110,16 @@ jobs:
|
|||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Download CLI # needed to determine K8s version for release versions
|
||||||
|
if: inputs.providerVersion != ''
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
curl -fsSL -o constellation https://github.com/edgelesssys/constellation/releases/download/${{ inputs.providerVersion }}/constellation-linux-amd64
|
||||||
|
chmod u+x constellation
|
||||||
|
./constellation version
|
||||||
|
mkdir -p ${{ github.workspace }}/release
|
||||||
|
cp ./constellation ${{ github.workspace }}/release
|
||||||
|
|
||||||
- name: Setup bazel
|
- name: Setup bazel
|
||||||
uses: ./.github/actions/setup_bazel_nix
|
uses: ./.github/actions/setup_bazel_nix
|
||||||
with:
|
with:
|
||||||
@ -186,26 +212,38 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# take the middle (2nd) supported Kubernetes version (default)
|
# take the middle (2nd) supported Kubernetes version (default)
|
||||||
kubernetes_version="$(../build/constellation config kubernetes-versions | awk 'NR==3{print $1}')"
|
if [[ "${{ inputs.providerVersion }}" != "" ]]; then
|
||||||
|
kubernetes_version="$(../release/constellation config kubernetes-versions | awk 'NR==3{print $1}')"
|
||||||
|
else
|
||||||
|
kubernetes_version="$(../build/constellation config kubernetes-versions | awk 'NR==3{print $1}')"
|
||||||
|
fi
|
||||||
|
|
||||||
cat > _override.tf <<EOF
|
cat > _override.tf <<EOF
|
||||||
terraform {
|
terraform {
|
||||||
required_providers {
|
required_providers {
|
||||||
constellation = {
|
constellation = {
|
||||||
source = "edgelesssys/constellation"
|
source = "edgelesssys/constellation"
|
||||||
version = "${version}"
|
version = "${version}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
name = "${{ steps.create-prefix.outputs.prefix }}"
|
control_plane_count = 1
|
||||||
version = "${image_version}"
|
worker_count = 1
|
||||||
microservice_version= "${prefixed_version}"
|
}
|
||||||
|
|
||||||
|
locals {
|
||||||
|
name = "${{ steps.create-prefix.outputs.prefix }}"
|
||||||
|
image_version = "${image_version}"
|
||||||
|
microservice_version = "${prefixed_version}"
|
||||||
kubernetes_version = "${kubernetes_version}"
|
kubernetes_version = "${kubernetes_version}"
|
||||||
}
|
}
|
||||||
|
|
||||||
module "${{ inputs.cloudProvider }}_iam" {
|
module "${{ inputs.cloudProvider }}_iam" {
|
||||||
source = "${iam_src}"
|
source = "${iam_src}"
|
||||||
}
|
}
|
||||||
|
|
||||||
module "${{ inputs.cloudProvider }}_infrastructure" {
|
module "${{ inputs.cloudProvider }}_infrastructure" {
|
||||||
source = "${infra_src}"
|
source = "${infra_src}"
|
||||||
}
|
}
|
||||||
@ -259,11 +297,134 @@ jobs:
|
|||||||
terraform apply -target module.azure_iam -auto-approve
|
terraform apply -target module.azure_iam -auto-approve
|
||||||
terraform apply -target module.azure_infrastructure -auto-approve
|
terraform apply -target module.azure_infrastructure -auto-approve
|
||||||
../build/constellation maa-patch "$(terraform output -raw maa_url)"
|
../build/constellation maa-patch "$(terraform output -raw maa_url)"
|
||||||
TF_LOG=INFO terraform apply -target constellation_cluster.azure_example -auto-approve
|
terraform apply -target constellation_cluster.azure_example -auto-approve
|
||||||
else
|
else
|
||||||
TF_LOG=INFO terraform apply -auto-approve
|
terraform apply -auto-approve
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
- name: Cleanup Terraform Cluster on failure
|
||||||
|
# cleanup here already on failure, because the subsequent TF overrides might make the TF config invalid and thus the destroy would fail later
|
||||||
|
# outcome is part of the steps context (https://docs.github.com/en/actions/learn-github-actions/contexts#steps-context)
|
||||||
|
if: failure() && steps.apply_terraform.outcome != 'skipped'
|
||||||
|
working-directory: ${{ github.workspace }}/cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
terraform init
|
||||||
|
terraform destroy -auto-approve
|
||||||
|
|
||||||
|
- name: Add Provider to local Terraform registry # needed if release version was used before
|
||||||
|
if: inputs.providerVersion != ''
|
||||||
|
working-directory: ${{ github.workspace }}/build
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
bazel run //:devbuild --cli_edition=enterprise
|
||||||
|
|
||||||
|
- name: Update cluster configuration # for duplicate variable declaration, the last one is used
|
||||||
|
working-directory: ${{ github.workspace }}/cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cat >> _override.tf <<EOF
|
||||||
|
locals {
|
||||||
|
image_version = "${{ inputs.toImage || steps.find-latest-image.outputs.image }}"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
if [[ "${{ inputs.toKubernetes }}" != "" ]]; then
|
||||||
|
cat >> _override.tf <<EOF
|
||||||
|
resource "constellation_cluster" "${{ inputs.cloudProvider }}_example" {
|
||||||
|
kubernetes_version = "${{ inputs.toKubernetes }}"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
prefixed_version=${{ steps.build.outputs.build_version }}
|
||||||
|
version=${prefixed_version#v} # remove v prefix
|
||||||
|
|
||||||
|
# needs to be explicitly set to upgrade
|
||||||
|
cat >> _override.tf <<EOF
|
||||||
|
resource "constellation_cluster" "${{ inputs.cloudProvider }}_example" {
|
||||||
|
constellation_microservice_version = "${prefixed_version}"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat >> _override.tf <<EOF
|
||||||
|
terraform {
|
||||||
|
required_providers {
|
||||||
|
constellation = {
|
||||||
|
source = "edgelesssys/constellation"
|
||||||
|
version = "${version}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
cat _override.tf
|
||||||
|
|
||||||
|
- name: Upgrade Terraform Cluster
|
||||||
|
working-directory: ${{ github.workspace }}/cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
terraform init --upgrade
|
||||||
|
terraform apply -auto-approve
|
||||||
|
|
||||||
|
- name: Assert upgrade successful
|
||||||
|
working-directory: ${{ github.workspace }}/cluster
|
||||||
|
env:
|
||||||
|
IMAGE: ${{ inputs.toImage && inputs.toImage || steps.find-latest-image.outputs.image }}
|
||||||
|
KUBERNETES: ${{ inputs.toKubernetes }}
|
||||||
|
MICROSERVICES: ${{ steps.build.outputs.build_version }}
|
||||||
|
WORKERNODES: 1
|
||||||
|
CONTROLNODES: 1
|
||||||
|
run: |
|
||||||
|
terraform output -raw kubeconfig > constellation-admin.conf
|
||||||
|
|
||||||
|
if [[ -n "${MICROSERVICES}" ]]; then
|
||||||
|
MICROSERVICES_FLAG="--target-microservices=${MICROSERVICES}"
|
||||||
|
fi
|
||||||
|
if [[ -n "${KUBERNETES}" ]]; then
|
||||||
|
KUBERNETES_FLAG="--target-kubernetes=${KUBERNETES}"
|
||||||
|
fi
|
||||||
|
if [[ -n "${IMAGE}" ]]; then
|
||||||
|
IMAGE_FLAG="--target-image=${IMAGE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# cfg must be in same dir as KUBECONFIG
|
||||||
|
../build/constellation config generate "${{ inputs.cloudProvider }}"
|
||||||
|
# make cfg valid with fake data
|
||||||
|
# IMPORTANT: zone needs to be correct because it is used to resolve the CSP image ref
|
||||||
|
if [[ "${{ inputs.cloudProvider }}" == "azure" ]]; then
|
||||||
|
location="${{ inputs.regionZone || 'northeurope' }}"
|
||||||
|
yq e ".provider.azure.location = \"${location}\"" -i constellation-conf.yaml
|
||||||
|
|
||||||
|
yq e '.provider.azure.subscription = "123e4567-e89b-12d3-a456-426614174000"' -i constellation-conf.yaml
|
||||||
|
yq e '.provider.azure.tenant = "123e4567-e89b-12d3-a456-426614174001"' -i constellation-conf.yaml
|
||||||
|
yq e '.provider.azure.resourceGroup = "myResourceGroup"' -i constellation-conf.yaml
|
||||||
|
yq e '.provider.azure.userAssignedIdentity = "myIdentity"' -i constellation-conf.yaml
|
||||||
|
fi
|
||||||
|
if [[ "${{ inputs.cloudProvider }}" == "gcp" ]]; then
|
||||||
|
zone="${{ inputs.regionZone || 'europe-west3-b' }}"
|
||||||
|
region=$(echo "${zone}" | rev | cut -c 2- | rev)
|
||||||
|
yq e ".provider.gcp.region = \"${region}\"" -i constellation-conf.yaml
|
||||||
|
yq e ".provider.gcp.zone = \"${zone}\"" -i constellation-conf.yaml
|
||||||
|
|
||||||
|
yq e '.provider.gcp.project = "demo-gcp-project"' -i constellation-conf.yaml
|
||||||
|
yq e '.nodeGroups.control_plane_default.zone = "europe-west3-b"' -i constellation-conf.yaml
|
||||||
|
# Set the zone for worker_default node group to a fictional value
|
||||||
|
yq e '.nodeGroups.worker_default.zone = "europe-west3-b"' -i constellation-conf.yaml
|
||||||
|
yq e '.provider.gcp.serviceAccountKeyPath = "/path/to/your/service-account-key.json"' -i constellation-conf.yaml
|
||||||
|
fi
|
||||||
|
if [[ "${{ inputs.cloudProvider }}" == "aws" ]]; then
|
||||||
|
zone=${{ inputs.regionZone || 'us-east-2c' }}
|
||||||
|
region=$(echo "${zone}" | rev | cut -c 2- | rev)
|
||||||
|
yq e ".provider.aws.region = \"${region}\"" -i constellation-conf.yaml
|
||||||
|
yq e ".provider.aws.zone = \"${zone}\"" -i constellation-conf.yaml
|
||||||
|
|
||||||
|
yq e '.provider.aws.iamProfileControlPlane = "demoControlPlaneIAMProfile"' -i constellation-conf.yaml
|
||||||
|
yq e '.provider.aws.iamProfileWorkerNodes = "demoWorkerNodesIAMProfile"' -i constellation-conf.yaml
|
||||||
|
yq e '.nodeGroups.control_plane_default.zone = "eu-central-1a"' -i constellation-conf.yaml
|
||||||
|
yq e '.nodeGroups.worker_default.zone = "eu-central-1a"' -i constellation-conf.yaml
|
||||||
|
fi
|
||||||
|
KUBECONFIG=${{ github.workspace }}/cluster/constellation-admin.conf bazel run //e2e/provider-upgrade:provider-upgrade_test -- --want-worker "$WORKERNODES" --want-control "$CONTROLNODES" --cli "${{ github.workspace }}/build/constellation" "$IMAGE_FLAG" "$KUBERNETES_FLAG" "$MICROSERVICES_FLAG"
|
||||||
|
|
||||||
- name: Destroy Terraform Cluster
|
- name: Destroy Terraform Cluster
|
||||||
# outcome is part of the steps context (https://docs.github.com/en/actions/learn-github-actions/contexts#steps-context)
|
# outcome is part of the steps context (https://docs.github.com/en/actions/learn-github-actions/contexts#steps-context)
|
||||||
if: always() && steps.apply_terraform.outcome != 'skipped'
|
if: always() && steps.apply_terraform.outcome != 'skipped'
|
||||||
|
@ -10,9 +10,19 @@ go_library(
|
|||||||
importpath = "github.com/edgelesssys/constellation/v2/e2e/internal/upgrade",
|
importpath = "github.com/edgelesssys/constellation/v2/e2e/internal/upgrade",
|
||||||
visibility = ["//e2e:__subpackages__"],
|
visibility = ["//e2e:__subpackages__"],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//internal/api/attestationconfigapi",
|
||||||
|
"//internal/config",
|
||||||
"//internal/constants",
|
"//internal/constants",
|
||||||
|
"//internal/file",
|
||||||
|
"//internal/imagefetcher",
|
||||||
"//internal/logger",
|
"//internal/logger",
|
||||||
"//internal/semver",
|
"//internal/semver",
|
||||||
|
"//internal/versions",
|
||||||
|
"@com_github_spf13_afero//:afero",
|
||||||
|
"@com_github_stretchr_testify//require",
|
||||||
|
"@io_bazel_rules_go//go/runfiles:go_default_library",
|
||||||
|
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
|
||||||
|
"@io_k8s_client_go//kubernetes",
|
||||||
"@sh_helm_helm_v3//pkg/action",
|
"@sh_helm_helm_v3//pkg/action",
|
||||||
"@sh_helm_helm_v3//pkg/cli",
|
"@sh_helm_helm_v3//pkg/cli",
|
||||||
],
|
],
|
||||||
@ -35,16 +45,9 @@ go_test(
|
|||||||
tags = ["manual"],
|
tags = ["manual"],
|
||||||
deps = [
|
deps = [
|
||||||
"//e2e/internal/kubectl",
|
"//e2e/internal/kubectl",
|
||||||
"//internal/api/attestationconfigapi",
|
|
||||||
"//internal/config",
|
|
||||||
"//internal/constants",
|
"//internal/constants",
|
||||||
"//internal/file",
|
|
||||||
"//internal/imagefetcher",
|
|
||||||
"//internal/semver",
|
|
||||||
"//internal/versions",
|
"//internal/versions",
|
||||||
"@com_github_spf13_afero//:afero",
|
|
||||||
"@com_github_stretchr_testify//require",
|
"@com_github_stretchr_testify//require",
|
||||||
"@io_bazel_rules_go//go/runfiles:go_default_library",
|
|
||||||
"@io_k8s_api//core/v1:core",
|
"@io_k8s_api//core/v1:core",
|
||||||
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
|
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
|
||||||
"@io_k8s_client_go//kubernetes",
|
"@io_k8s_client_go//kubernetes",
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright (c) Edgeless Systems GmbH
|
Copyright (c) Edgeless Systems GmbH
|
||||||
|
|
||||||
@ -17,3 +19,313 @@ SPDX-License-Identifier: AGPL-3.0-only
|
|||||||
//
|
//
|
||||||
// - set or fetch measurements depending on target image
|
// - set or fetch measurements depending on target image
|
||||||
package upgrade
|
package upgrade
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/bazelbuild/rules_go/go/runfiles"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/semver"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// tickDuration is the duration between two checks to see if the upgrade is successful.
|
||||||
|
var tickDuration = 10 * time.Second // small tick duration to speed up tests
|
||||||
|
|
||||||
|
// VersionContainer contains the versions that the cluster should be upgraded to.
|
||||||
|
type VersionContainer struct {
|
||||||
|
ImageRef string
|
||||||
|
Kubernetes versions.ValidK8sVersion
|
||||||
|
Microservices semver.Semver
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertUpgradeSuccessful tests that the upgrade to the target version is successful.
|
||||||
|
func AssertUpgradeSuccessful(t *testing.T, cli string, targetVersions VersionContainer, k *kubernetes.Clientset, wantControl, wantWorker int, timeout time.Duration) {
|
||||||
|
wg := queryStatusAsync(t, cli)
|
||||||
|
require.NotNil(t, k)
|
||||||
|
|
||||||
|
testMicroservicesEventuallyHaveVersion(t, targetVersions.Microservices, timeout)
|
||||||
|
log.Println("Microservices are upgraded.")
|
||||||
|
|
||||||
|
testNodesEventuallyHaveVersion(t, k, targetVersions, wantControl+wantWorker, timeout)
|
||||||
|
log.Println("Nodes are upgraded.")
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func queryStatusAsync(t *testing.T, cli string) *sync.WaitGroup {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
// The first control plane node should finish upgrading after 20 minutes. If it does not, something is fishy.
|
||||||
|
// Nodes can upgrade in <5mins.
|
||||||
|
testStatusEventuallyWorks(t, cli, 20*time.Minute)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return &wg
|
||||||
|
}
|
||||||
|
|
||||||
|
func testStatusEventuallyWorks(t *testing.T, cli string, timeout time.Duration) {
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
// Show versions set in cluster.
|
||||||
|
// The string after "Cluster status:" in the output might not be updated yet.
|
||||||
|
// This is only updated after the operator finishes one reconcile loop.
|
||||||
|
cmd := exec.CommandContext(context.Background(), cli, "status")
|
||||||
|
stdout, stderr, err := runCommandWithSeparateOutputs(cmd)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Stdout: %s\nStderr: %s", string(stdout), string(stderr))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println(string(stdout))
|
||||||
|
return true
|
||||||
|
}, timeout, tickDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testMicroservicesEventuallyHaveVersion(t *testing.T, wantMicroserviceVersion semver.Semver, timeout time.Duration) {
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
version, err := servicesVersion(t)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Unable to fetch microservice version: %v\n", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if version != wantMicroserviceVersion {
|
||||||
|
log.Printf("Microservices still at version %v, want %v\n", version, wantMicroserviceVersion)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}, timeout, tickDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testNodesEventuallyHaveVersion(t *testing.T, k *kubernetes.Clientset, targetVersions VersionContainer, totalNodeCount int, timeout time.Duration) {
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
nodes, err := k.CoreV1().Nodes().List(context.Background(), metaV1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// require is not printed in the logs, so we use fmt
|
||||||
|
tooSmallNodeCount := len(nodes.Items) < totalNodeCount
|
||||||
|
if tooSmallNodeCount {
|
||||||
|
log.Printf("expected at least %v nodes, got %v", totalNodeCount, len(nodes.Items))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
allUpdated := true
|
||||||
|
log.Printf("Node status (%v):", time.Now())
|
||||||
|
for _, node := range nodes.Items {
|
||||||
|
for key, value := range node.Annotations {
|
||||||
|
if targetVersions.ImageRef != "" {
|
||||||
|
if key == "constellation.edgeless.systems/node-image" {
|
||||||
|
if !strings.EqualFold(value, targetVersions.ImageRef) {
|
||||||
|
log.Printf("\t%s: Image %s, want %s\n", node.Name, value, targetVersions.ImageRef)
|
||||||
|
fmt.Printf("\tP: %s: Image %s, want %s\n", node.Name, value, targetVersions.ImageRef)
|
||||||
|
allUpdated = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if targetVersions.Kubernetes != "" {
|
||||||
|
kubeletVersion := node.Status.NodeInfo.KubeletVersion
|
||||||
|
if kubeletVersion != string(targetVersions.Kubernetes) {
|
||||||
|
log.Printf("\t%s: K8s (Kubelet) %s, want %s\n", node.Name, kubeletVersion, targetVersions.Kubernetes)
|
||||||
|
allUpdated = false
|
||||||
|
}
|
||||||
|
kubeProxyVersion := node.Status.NodeInfo.KubeProxyVersion
|
||||||
|
if kubeProxyVersion != string(targetVersions.Kubernetes) {
|
||||||
|
log.Printf("\t%s: K8s (Proxy) %s, want %s\n", node.Name, kubeProxyVersion, targetVersions.Kubernetes)
|
||||||
|
allUpdated = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return allUpdated
|
||||||
|
}, timeout, tickDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
// runCommandWithSeparateOutputs runs the given command while separating buffers for
|
||||||
|
// stdout and stderr.
|
||||||
|
func runCommandWithSeparateOutputs(cmd *exec.Cmd) (stdout, stderr []byte, err error) {
|
||||||
|
stdout = []byte{}
|
||||||
|
stderr = []byte{}
|
||||||
|
|
||||||
|
stdoutIn, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("create stdout pipe: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
stderrIn, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("create stderr pipe: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cmd.Start()
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("start command: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
continuouslyPrintOutput := func(r io.Reader, prefix string) {
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
for scanner.Scan() {
|
||||||
|
output := scanner.Text()
|
||||||
|
fmt.Printf("%s: %s\n", prefix, output)
|
||||||
|
switch prefix {
|
||||||
|
case "stdout":
|
||||||
|
stdout = append(stdout, output...)
|
||||||
|
case "stderr":
|
||||||
|
stderr = append(stderr, output...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
go continuouslyPrintOutput(stdoutIn, "stdout")
|
||||||
|
go continuouslyPrintOutput(stderrIn, "stderr")
|
||||||
|
|
||||||
|
if err = cmd.Wait(); err != nil {
|
||||||
|
err = fmt.Errorf("wait for command to finish: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stdout, stderr, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup checks that the prerequisites for the test are met:
|
||||||
|
// - a workspace is set
|
||||||
|
// - a CLI path is set
|
||||||
|
// - the constellation-upgrade folder does not exist.
|
||||||
|
func Setup(workspace, cliPath string) error {
|
||||||
|
workingDir, err := workingDir(workspace)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting working directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Chdir(workingDir); err != nil {
|
||||||
|
return fmt.Errorf("changing working directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := getCLIPath(cliPath); err != nil {
|
||||||
|
return fmt.Errorf("getting CLI path: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// workingDir returns the path to the workspace.
|
||||||
|
func workingDir(workspace string) (string, error) {
|
||||||
|
workingDir := os.Getenv("BUILD_WORKING_DIRECTORY")
|
||||||
|
switch {
|
||||||
|
case workingDir != "":
|
||||||
|
return workingDir, nil
|
||||||
|
case workspace != "":
|
||||||
|
return workspace, nil
|
||||||
|
default:
|
||||||
|
return "", errors.New("neither 'BUILD_WORKING_DIRECTORY' nor 'workspace' flag set")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteUpgradeConfig writes the target versions to the config file.
|
||||||
|
func WriteUpgradeConfig(require *require.Assertions, image string, kubernetes string, microservices string, configPath string) VersionContainer {
|
||||||
|
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||||
|
attestationFetcher := attestationconfigapi.NewFetcher()
|
||||||
|
cfg, err := config.New(fileHandler, configPath, attestationFetcher, true)
|
||||||
|
var cfgErr *config.ValidationError
|
||||||
|
var longMsg string
|
||||||
|
if errors.As(err, &cfgErr) {
|
||||||
|
longMsg = cfgErr.LongMessage()
|
||||||
|
}
|
||||||
|
require.NoError(err, longMsg)
|
||||||
|
|
||||||
|
imageFetcher := imagefetcher.New()
|
||||||
|
imageRef, err := imageFetcher.FetchReference(
|
||||||
|
context.Background(),
|
||||||
|
cfg.GetProvider(),
|
||||||
|
cfg.GetAttestationConfig().GetVariant(),
|
||||||
|
image,
|
||||||
|
cfg.GetRegion(), cfg.UseMarketplaceImage(),
|
||||||
|
)
|
||||||
|
require.NoError(err)
|
||||||
|
|
||||||
|
log.Printf("Setting image version: %s\n", image)
|
||||||
|
cfg.Image = image
|
||||||
|
|
||||||
|
defaultConfig := config.Default()
|
||||||
|
var kubernetesVersion versions.ValidK8sVersion
|
||||||
|
if kubernetes == "" {
|
||||||
|
kubernetesVersion = defaultConfig.KubernetesVersion
|
||||||
|
} else {
|
||||||
|
kubernetesVersion = versions.ValidK8sVersion(kubernetes) // ignore validation because the config is only written to file
|
||||||
|
}
|
||||||
|
|
||||||
|
var microserviceVersion semver.Semver
|
||||||
|
if microservices == "" {
|
||||||
|
microserviceVersion = defaultConfig.MicroserviceVersion
|
||||||
|
} else {
|
||||||
|
version, err := semver.New(microservices)
|
||||||
|
require.NoError(err)
|
||||||
|
microserviceVersion = version
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Setting K8s version: %s\n", kubernetesVersion)
|
||||||
|
cfg.KubernetesVersion = kubernetesVersion
|
||||||
|
log.Printf("Setting microservice version: %s\n", microserviceVersion)
|
||||||
|
cfg.MicroserviceVersion = microserviceVersion
|
||||||
|
|
||||||
|
err = fileHandler.WriteYAML(constants.ConfigFilename, cfg, file.OptOverwrite)
|
||||||
|
require.NoError(err)
|
||||||
|
|
||||||
|
return VersionContainer{ImageRef: imageRef, Kubernetes: kubernetesVersion, Microservices: microserviceVersion}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCLIPath returns the path to the CLI.
|
||||||
|
func getCLIPath(cliPathFlag string) (string, error) {
|
||||||
|
pathCLI := os.Getenv("PATH_CLI")
|
||||||
|
var relCLIPath string
|
||||||
|
switch {
|
||||||
|
case pathCLI != "":
|
||||||
|
relCLIPath = pathCLI
|
||||||
|
case cliPathFlag != "":
|
||||||
|
relCLIPath = cliPathFlag
|
||||||
|
default:
|
||||||
|
return "", errors.New("neither 'PATH_CLI' nor 'cli' flag set")
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to find the CLI in the working directory
|
||||||
|
// (e.g. when running via `go test` or when specifying a path manually)
|
||||||
|
workdir, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("getting working directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
absCLIPath := relCLIPath
|
||||||
|
if !filepath.IsAbs(relCLIPath) {
|
||||||
|
absCLIPath = filepath.Join(workdir, relCLIPath)
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(absCLIPath); err == nil {
|
||||||
|
return absCLIPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fall back to runfiles (e.g. when running via bazel)
|
||||||
|
return runfiles.Rlocation(pathCLI)
|
||||||
|
}
|
||||||
|
@ -9,31 +9,20 @@ SPDX-License-Identifier: AGPL-3.0-only
|
|||||||
package upgrade
|
package upgrade
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/bazelbuild/rules_go/go/runfiles"
|
|
||||||
"github.com/edgelesssys/constellation/v2/e2e/internal/kubectl"
|
"github.com/edgelesssys/constellation/v2/e2e/internal/kubectl"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
coreV1 "k8s.io/api/core/v1"
|
coreV1 "k8s.io/api/core/v1"
|
||||||
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -52,8 +41,8 @@ var (
|
|||||||
// When executing the test as a bazel target the CLI path is supplied through an env variable that bazel sets.
|
// When executing the test as a bazel target the CLI path is supplied through an env variable that bazel sets.
|
||||||
// When executing via `go test` extra care should be taken that the supplied CLI is built on the same commit as this test.
|
// When executing via `go test` extra care should be taken that the supplied CLI is built on the same commit as this test.
|
||||||
cliPath = flag.String("cli", "", "Constellation CLI to run the tests.")
|
cliPath = flag.String("cli", "", "Constellation CLI to run the tests.")
|
||||||
wantWorker = flag.Int("want-worker", 0, "Number of wanted worker nodes.")
|
wantWorker = flag.Int("want-worker", 1, "Number of wanted worker nodes.")
|
||||||
wantControl = flag.Int("want-control", 0, "Number of wanted control nodes.")
|
wantControl = flag.Int("want-control", 1, "Number of wanted control nodes.")
|
||||||
timeout = flag.Duration("timeout", 3*time.Hour, "Timeout after which the cluster should have converged to the target version.")
|
timeout = flag.Duration("timeout", 3*time.Hour, "Timeout after which the cluster should have converged to the target version.")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -63,7 +52,7 @@ var (
|
|||||||
func TestUpgrade(t *testing.T) {
|
func TestUpgrade(t *testing.T) {
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
|
||||||
err := setup()
|
err := Setup(*workspace, *cliPath)
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
k, err := kubectl.New()
|
k, err := kubectl.New()
|
||||||
@ -79,7 +68,7 @@ func TestUpgrade(t *testing.T) {
|
|||||||
cli, err := getCLIPath(*cliPath)
|
cli, err := getCLIPath(*cliPath)
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
targetVersions := writeUpgradeConfig(require, *targetImage, *targetKubernetes, *targetMicroservices)
|
targetVersions := WriteUpgradeConfig(require, *targetImage, *targetKubernetes, *targetMicroservices, constants.ConfigFilename)
|
||||||
|
|
||||||
log.Println("Fetching measurements for new image.")
|
log.Println("Fetching measurements for new image.")
|
||||||
cmd := exec.CommandContext(context.Background(), cli, "config", "fetch-measurements", "--insecure", "--debug")
|
cmd := exec.CommandContext(context.Background(), cli, "config", "fetch-measurements", "--insecure", "--debug")
|
||||||
@ -97,77 +86,7 @@ func TestUpgrade(t *testing.T) {
|
|||||||
log.Println("Triggering upgrade.")
|
log.Println("Triggering upgrade.")
|
||||||
runUpgradeApply(require, cli)
|
runUpgradeApply(require, cli)
|
||||||
|
|
||||||
wg := queryStatusAsync(t, cli)
|
AssertUpgradeSuccessful(t, cli, targetVersions, k, *wantControl, *wantWorker, *timeout)
|
||||||
|
|
||||||
testMicroservicesEventuallyHaveVersion(t, targetVersions.microservices, *timeout)
|
|
||||||
testNodesEventuallyHaveVersion(t, k, targetVersions, *wantControl+*wantWorker, *timeout)
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// setup checks that the prerequisites for the test are met:
|
|
||||||
// - a workspace is set
|
|
||||||
// - a CLI path is set
|
|
||||||
// - the constellation-upgrade folder does not exist.
|
|
||||||
func setup() error {
|
|
||||||
workingDir, err := workingDir(*workspace)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("getting working directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Chdir(workingDir); err != nil {
|
|
||||||
return fmt.Errorf("changing working directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := getCLIPath(*cliPath); err != nil {
|
|
||||||
return fmt.Errorf("getting CLI path: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// workingDir returns the path to the workspace.
|
|
||||||
func workingDir(workspace string) (string, error) {
|
|
||||||
workingDir := os.Getenv("BUILD_WORKING_DIRECTORY")
|
|
||||||
switch {
|
|
||||||
case workingDir != "":
|
|
||||||
return workingDir, nil
|
|
||||||
case workspace != "":
|
|
||||||
return workspace, nil
|
|
||||||
default:
|
|
||||||
return "", errors.New("neither 'BUILD_WORKING_DIRECTORY' nor 'workspace' flag set")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getCLIPath returns the path to the CLI.
|
|
||||||
func getCLIPath(cliPathFlag string) (string, error) {
|
|
||||||
pathCLI := os.Getenv("PATH_CLI")
|
|
||||||
var relCLIPath string
|
|
||||||
switch {
|
|
||||||
case pathCLI != "":
|
|
||||||
relCLIPath = pathCLI
|
|
||||||
case cliPathFlag != "":
|
|
||||||
relCLIPath = cliPathFlag
|
|
||||||
default:
|
|
||||||
return "", errors.New("neither 'PATH_CLI' nor 'cli' flag set")
|
|
||||||
}
|
|
||||||
|
|
||||||
// try to find the CLI in the working directory
|
|
||||||
// (e.g. when running via `go test` or when specifying a path manually)
|
|
||||||
workdir, err := os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("getting working directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
absCLIPath := relCLIPath
|
|
||||||
if !filepath.IsAbs(relCLIPath) {
|
|
||||||
absCLIPath = filepath.Join(workdir, relCLIPath)
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(absCLIPath); err == nil {
|
|
||||||
return absCLIPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fall back to runfiles (e.g. when running via bazel)
|
|
||||||
return runfiles.Rlocation(pathCLI)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// testPodsEventuallyReady checks that:
|
// testPodsEventuallyReady checks that:
|
||||||
@ -249,58 +168,6 @@ func testNodesEventuallyAvailable(t *testing.T, k *kubernetes.Clientset, wantCon
|
|||||||
}, time.Minute*30, time.Minute)
|
}, time.Minute*30, time.Minute)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeUpgradeConfig(require *require.Assertions, image string, kubernetes string, microservices string) versionContainer {
|
|
||||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
|
||||||
attestationFetcher := attestationconfigapi.NewFetcher()
|
|
||||||
cfg, err := config.New(fileHandler, constants.ConfigFilename, attestationFetcher, true)
|
|
||||||
var cfgErr *config.ValidationError
|
|
||||||
var longMsg string
|
|
||||||
if errors.As(err, &cfgErr) {
|
|
||||||
longMsg = cfgErr.LongMessage()
|
|
||||||
}
|
|
||||||
require.NoError(err, longMsg)
|
|
||||||
|
|
||||||
imageFetcher := imagefetcher.New()
|
|
||||||
imageRef, err := imageFetcher.FetchReference(
|
|
||||||
context.Background(),
|
|
||||||
cfg.GetProvider(),
|
|
||||||
cfg.GetAttestationConfig().GetVariant(),
|
|
||||||
image,
|
|
||||||
cfg.GetRegion(), cfg.UseMarketplaceImage(),
|
|
||||||
)
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
log.Printf("Setting image version: %s\n", image)
|
|
||||||
cfg.Image = image
|
|
||||||
|
|
||||||
defaultConfig := config.Default()
|
|
||||||
var kubernetesVersion versions.ValidK8sVersion
|
|
||||||
if kubernetes == "" {
|
|
||||||
kubernetesVersion = defaultConfig.KubernetesVersion
|
|
||||||
} else {
|
|
||||||
kubernetesVersion = versions.ValidK8sVersion(kubernetes) // ignore validation because the config is only written to file
|
|
||||||
}
|
|
||||||
|
|
||||||
var microserviceVersion semver.Semver
|
|
||||||
if microservices == "" {
|
|
||||||
microserviceVersion = defaultConfig.MicroserviceVersion
|
|
||||||
} else {
|
|
||||||
version, err := semver.New(microservices)
|
|
||||||
require.NoError(err)
|
|
||||||
microserviceVersion = version
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Setting K8s version: %s\n", kubernetesVersion)
|
|
||||||
cfg.KubernetesVersion = kubernetesVersion
|
|
||||||
log.Printf("Setting microservice version: %s\n", microserviceVersion)
|
|
||||||
cfg.MicroserviceVersion = microserviceVersion
|
|
||||||
|
|
||||||
err = fileHandler.WriteYAML(constants.ConfigFilename, cfg, file.OptOverwrite)
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
return versionContainer{imageRef: imageRef, kubernetes: kubernetesVersion, microservices: microserviceVersion}
|
|
||||||
}
|
|
||||||
|
|
||||||
// runUpgradeCheck executes 'upgrade check' and does basic checks on the output.
|
// runUpgradeCheck executes 'upgrade check' and does basic checks on the output.
|
||||||
// We can not check images upgrades because we might use unpublished images. CLI uses public CDN to check for available images.
|
// We can not check images upgrades because we might use unpublished images. CLI uses public CDN to check for available images.
|
||||||
func runUpgradeCheck(require *require.Assertions, cli, targetKubernetes string) {
|
func runUpgradeCheck(require *require.Assertions, cli, targetKubernetes string) {
|
||||||
@ -361,140 +228,3 @@ func containsUnexepectedMsg(input string) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func queryStatusAsync(t *testing.T, cli string) *sync.WaitGroup {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
// The first control plane node should finish upgrading after 20 minutes. If it does not, something is fishy.
|
|
||||||
// Nodes can upgrade in <5mins.
|
|
||||||
testStatusEventuallyWorks(t, cli, 20*time.Minute)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return &wg
|
|
||||||
}
|
|
||||||
|
|
||||||
func testStatusEventuallyWorks(t *testing.T, cli string, timeout time.Duration) {
|
|
||||||
require.Eventually(t, func() bool {
|
|
||||||
// Show versions set in cluster.
|
|
||||||
// The string after "Cluster status:" in the output might not be updated yet.
|
|
||||||
// This is only updated after the operator finishes one reconcile loop.
|
|
||||||
cmd := exec.CommandContext(context.Background(), cli, "status")
|
|
||||||
stdout, stderr, err := runCommandWithSeparateOutputs(cmd)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Stdout: %s\nStderr: %s", string(stdout), string(stderr))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Println(string(stdout))
|
|
||||||
return true
|
|
||||||
}, timeout, time.Minute)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testMicroservicesEventuallyHaveVersion(t *testing.T, wantMicroserviceVersion semver.Semver, timeout time.Duration) {
|
|
||||||
require.Eventually(t, func() bool {
|
|
||||||
version, err := servicesVersion(t)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Unable to fetch microservice version: %v\n", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if version != wantMicroserviceVersion {
|
|
||||||
log.Printf("Microservices still at version %v, want %v\n", version, wantMicroserviceVersion)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}, timeout, time.Minute)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testNodesEventuallyHaveVersion(t *testing.T, k *kubernetes.Clientset, targetVersions versionContainer, totalNodeCount int, timeout time.Duration) {
|
|
||||||
require.Eventually(t, func() bool {
|
|
||||||
nodes, err := k.CoreV1().Nodes().List(context.Background(), metaV1.ListOptions{})
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
require.False(t, len(nodes.Items) < totalNodeCount, "expected at least %v nodes, got %v", totalNodeCount, len(nodes.Items))
|
|
||||||
|
|
||||||
allUpdated := true
|
|
||||||
log.Printf("Node status (%v):", time.Now())
|
|
||||||
for _, node := range nodes.Items {
|
|
||||||
for key, value := range node.Annotations {
|
|
||||||
if key == "constellation.edgeless.systems/node-image" {
|
|
||||||
if !strings.EqualFold(value, targetVersions.imageRef) {
|
|
||||||
log.Printf("\t%s: Image %s, want %s\n", node.Name, value, targetVersions.imageRef)
|
|
||||||
allUpdated = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
kubeletVersion := node.Status.NodeInfo.KubeletVersion
|
|
||||||
if kubeletVersion != string(targetVersions.kubernetes) {
|
|
||||||
log.Printf("\t%s: K8s (Kubelet) %s, want %s\n", node.Name, kubeletVersion, targetVersions.kubernetes)
|
|
||||||
allUpdated = false
|
|
||||||
}
|
|
||||||
kubeProxyVersion := node.Status.NodeInfo.KubeProxyVersion
|
|
||||||
if kubeProxyVersion != string(targetVersions.kubernetes) {
|
|
||||||
log.Printf("\t%s: K8s (Proxy) %s, want %s\n", node.Name, kubeProxyVersion, targetVersions.kubernetes)
|
|
||||||
allUpdated = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return allUpdated
|
|
||||||
}, timeout, time.Minute)
|
|
||||||
}
|
|
||||||
|
|
||||||
type versionContainer struct {
|
|
||||||
imageRef string
|
|
||||||
kubernetes versions.ValidK8sVersion
|
|
||||||
microservices semver.Semver
|
|
||||||
}
|
|
||||||
|
|
||||||
// runCommandWithSeparateOutputs runs the given command while separating buffers for
|
|
||||||
// stdout and stderr.
|
|
||||||
func runCommandWithSeparateOutputs(cmd *exec.Cmd) (stdout, stderr []byte, err error) {
|
|
||||||
stdout = []byte{}
|
|
||||||
stderr = []byte{}
|
|
||||||
|
|
||||||
stdoutIn, err := cmd.StdoutPipe()
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("create stdout pipe: %w", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
stderrIn, err := cmd.StderrPipe()
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("create stderr pipe: %w", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = cmd.Start()
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("start command: %w", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
continuouslyPrintOutput := func(r io.Reader, prefix string) {
|
|
||||||
scanner := bufio.NewScanner(r)
|
|
||||||
for scanner.Scan() {
|
|
||||||
output := scanner.Text()
|
|
||||||
fmt.Printf("%s: %s\n", prefix, output)
|
|
||||||
switch prefix {
|
|
||||||
case "stdout":
|
|
||||||
stdout = append(stdout, output...)
|
|
||||||
case "stderr":
|
|
||||||
stderr = append(stderr, output...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
go continuouslyPrintOutput(stdoutIn, "stdout")
|
|
||||||
go continuouslyPrintOutput(stderrIn, "stderr")
|
|
||||||
|
|
||||||
if err = cmd.Wait(); err != nil {
|
|
||||||
err = fmt.Errorf("wait for command to finish: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return stdout, stderr, err
|
|
||||||
}
|
|
||||||
|
15
e2e/provider-upgrade/BUILD.bazel
Normal file
15
e2e/provider-upgrade/BUILD.bazel
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
load("//bazel/go:go_test.bzl", "go_test")
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "provider-upgrade_test",
|
||||||
|
srcs = ["upgrade_test.go"],
|
||||||
|
# keep
|
||||||
|
gotags = ["e2e"],
|
||||||
|
tags = ["manual"],
|
||||||
|
deps = [
|
||||||
|
"//e2e/internal/kubectl",
|
||||||
|
"//e2e/internal/upgrade",
|
||||||
|
"//internal/constants",
|
||||||
|
"@com_github_stretchr_testify//require",
|
||||||
|
],
|
||||||
|
)
|
60
e2e/provider-upgrade/upgrade_test.go
Normal file
60
e2e/provider-upgrade/upgrade_test.go
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright (c) Edgeless Systems GmbH
|
||||||
|
|
||||||
|
SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
*/
|
||||||
|
|
||||||
|
// End-to-end test that is used by the e2e Terraform provider test.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/v2/e2e/internal/kubectl"
|
||||||
|
"github.com/edgelesssys/constellation/v2/e2e/internal/upgrade"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
targetImage = flag.String("target-image", "", "Image (shortversion) to upgrade to.")
|
||||||
|
targetKubernetes = flag.String("target-kubernetes", "", "Kubernetes version (MAJOR.MINOR.PATCH) to upgrade to. Defaults to default version of target CLI.")
|
||||||
|
targetMicroservices = flag.String("target-microservices", "", "Microservice version (MAJOR.MINOR.PATCH) to upgrade to. Defaults to default version of target CLI.")
|
||||||
|
// When executing the test as a bazel target the CLI path is supplied through an env variable that bazel sets.
|
||||||
|
// When executing via `go test` extra care should be taken that the supplied CLI is built on the same commit as this test.
|
||||||
|
// When executing the test as a bazel target the workspace path is supplied through an env variable that bazel sets.
|
||||||
|
workspace = flag.String("workspace", "", "Constellation workspace in which to run the tests.")
|
||||||
|
cliPath = flag.String("cli", "", "Constellation CLI to run the tests.")
|
||||||
|
wantWorker = flag.Int("want-worker", 0, "Number of wanted worker nodes.")
|
||||||
|
wantControl = flag.Int("want-control", 0, "Number of wanted control nodes.")
|
||||||
|
timeout = flag.Duration("timeout", 90*time.Minute, "Timeout after which the cluster should have converged to the target version.")
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestUpgradeSuccessful tests that the upgrade to the target version is successful.
|
||||||
|
func TestUpgradeSuccessful(t *testing.T) {
|
||||||
|
require := require.New(t)
|
||||||
|
kubeconfigPath := os.Getenv("KUBECONFIG")
|
||||||
|
require.NotEmpty(kubeconfigPath, "KUBECONFIG environment variable must be set")
|
||||||
|
dir := filepath.Dir(kubeconfigPath)
|
||||||
|
configPath := filepath.Join(dir, constants.ConfigFilename)
|
||||||
|
|
||||||
|
// only done here to construct the version struct
|
||||||
|
require.NotEqual(*targetImage, "", "--target-image needs to be specified")
|
||||||
|
v := upgrade.WriteUpgradeConfig(require, *targetImage, *targetKubernetes, *targetMicroservices, configPath)
|
||||||
|
// ignore Kubernetes check if targetKubernetes is not set; Kubernetes is only explicitly upgraded
|
||||||
|
if *targetKubernetes == "" {
|
||||||
|
v.Kubernetes = ""
|
||||||
|
}
|
||||||
|
k, err := kubectl.New()
|
||||||
|
require.NoError(err)
|
||||||
|
|
||||||
|
err = upgrade.Setup(*workspace, *cliPath)
|
||||||
|
require.NoError(err)
|
||||||
|
upgrade.AssertUpgradeSuccessful(t, *cliPath, v, k, *wantControl, *wantWorker, *timeout)
|
||||||
|
}
|
@ -13,13 +13,15 @@ terraform {
|
|||||||
|
|
||||||
locals {
|
locals {
|
||||||
name = "constell"
|
name = "constell"
|
||||||
version = "vX.Y.Z"
|
image_version = "vX.Y.Z"
|
||||||
kubernetes_version = "vX.Y.Z"
|
kubernetes_version = "vX.Y.Z"
|
||||||
microservice_version = "vX.Y.Z"
|
microservice_version = "vX.Y.Z"
|
||||||
csp = "aws"
|
csp = "aws"
|
||||||
attestation_variant = "aws-sev-snp"
|
attestation_variant = "aws-sev-snp"
|
||||||
region = "us-east-2"
|
region = "us-east-2"
|
||||||
zone = "us-east-2c"
|
zone = "us-east-2c"
|
||||||
|
control_plane_count = 3
|
||||||
|
worker_count = 2
|
||||||
|
|
||||||
master_secret = random_bytes.master_secret.hex
|
master_secret = random_bytes.master_secret.hex
|
||||||
master_secret_salt = random_bytes.master_secret_salt.hex
|
master_secret_salt = random_bytes.master_secret_salt.hex
|
||||||
@ -55,7 +57,7 @@ module "aws_infrastructure" {
|
|||||||
instance_type = "m6a.xlarge"
|
instance_type = "m6a.xlarge"
|
||||||
disk_size = 30
|
disk_size = 30
|
||||||
disk_type = "gp3"
|
disk_type = "gp3"
|
||||||
initial_count = 3
|
initial_count = local.control_plane_count
|
||||||
zone = local.zone
|
zone = local.zone
|
||||||
},
|
},
|
||||||
worker_default = {
|
worker_default = {
|
||||||
@ -63,7 +65,7 @@ module "aws_infrastructure" {
|
|||||||
instance_type = "m6a.xlarge"
|
instance_type = "m6a.xlarge"
|
||||||
disk_size = 30
|
disk_size = 30
|
||||||
disk_type = "gp3"
|
disk_type = "gp3"
|
||||||
initial_count = 2
|
initial_count = local.worker_count
|
||||||
zone = local.zone
|
zone = local.zone
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -87,7 +89,7 @@ data "constellation_attestation" "foo" {
|
|||||||
data "constellation_image" "bar" {
|
data "constellation_image" "bar" {
|
||||||
csp = local.csp
|
csp = local.csp
|
||||||
attestation_variant = local.attestation_variant
|
attestation_variant = local.attestation_variant
|
||||||
version = local.version
|
version = local.image_version
|
||||||
region = local.region
|
region = local.region
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,12 +13,14 @@ terraform {
|
|||||||
|
|
||||||
locals {
|
locals {
|
||||||
name = "constell"
|
name = "constell"
|
||||||
version = "vX.Y.Z"
|
image_version = "vX.Y.Z"
|
||||||
kubernetes_version = "vX.Y.Z"
|
kubernetes_version = "vX.Y.Z"
|
||||||
microservice_version = "vX.Y.Z"
|
microservice_version = "vX.Y.Z"
|
||||||
csp = "azure"
|
csp = "azure"
|
||||||
attestation_variant = "azure-sev-snp"
|
attestation_variant = "azure-sev-snp"
|
||||||
location = "northeurope"
|
location = "northeurope"
|
||||||
|
control_plane_count = 3
|
||||||
|
worker_count = 2
|
||||||
|
|
||||||
master_secret = random_bytes.master_secret.hex
|
master_secret = random_bytes.master_secret.hex
|
||||||
master_secret_salt = random_bytes.master_secret_salt.hex
|
master_secret_salt = random_bytes.master_secret_salt.hex
|
||||||
@ -56,14 +58,14 @@ module "azure_infrastructure" {
|
|||||||
instance_type = "Standard_DC4as_v5"
|
instance_type = "Standard_DC4as_v5"
|
||||||
disk_size = 30
|
disk_size = 30
|
||||||
disk_type = "Premium_LRS"
|
disk_type = "Premium_LRS"
|
||||||
initial_count = 3
|
initial_count = local.control_plane_count
|
||||||
},
|
},
|
||||||
worker_default = {
|
worker_default = {
|
||||||
role = "worker"
|
role = "worker"
|
||||||
instance_type = "Standard_DC4as_v5"
|
instance_type = "Standard_DC4as_v5"
|
||||||
disk_size = 30
|
disk_size = 30
|
||||||
disk_type = "Premium_LRS"
|
disk_type = "Premium_LRS"
|
||||||
initial_count = 2
|
initial_count = local.worker_count
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
location = local.location
|
location = local.location
|
||||||
@ -83,7 +85,7 @@ data "constellation_attestation" "foo" {
|
|||||||
data "constellation_image" "bar" {
|
data "constellation_image" "bar" {
|
||||||
csp = local.csp
|
csp = local.csp
|
||||||
attestation_variant = local.attestation_variant
|
attestation_variant = local.attestation_variant
|
||||||
version = local.version
|
version = local.image_version
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "constellation_cluster" "azure_example" {
|
resource "constellation_cluster" "azure_example" {
|
||||||
|
@ -13,7 +13,7 @@ terraform {
|
|||||||
|
|
||||||
locals {
|
locals {
|
||||||
name = "constell"
|
name = "constell"
|
||||||
version = "vX.Y.Z"
|
image_version = "vX.Y.Z"
|
||||||
kubernetes_version = "vX.Y.Z"
|
kubernetes_version = "vX.Y.Z"
|
||||||
microservice_version = "vX.Y.Z"
|
microservice_version = "vX.Y.Z"
|
||||||
csp = "gcp"
|
csp = "gcp"
|
||||||
@ -21,6 +21,8 @@ locals {
|
|||||||
region = "europe-west3"
|
region = "europe-west3"
|
||||||
zone = "europe-west3-b"
|
zone = "europe-west3-b"
|
||||||
project_id = "constellation-331613"
|
project_id = "constellation-331613"
|
||||||
|
control_plane_count = 3
|
||||||
|
worker_count = 2
|
||||||
|
|
||||||
master_secret = random_bytes.master_secret.hex
|
master_secret = random_bytes.master_secret.hex
|
||||||
master_secret_salt = random_bytes.master_secret_salt.hex
|
master_secret_salt = random_bytes.master_secret_salt.hex
|
||||||
@ -58,7 +60,7 @@ module "gcp_infrastructure" {
|
|||||||
instance_type = "n2d-standard-4"
|
instance_type = "n2d-standard-4"
|
||||||
disk_size = 30
|
disk_size = 30
|
||||||
disk_type = "pd-ssd"
|
disk_type = "pd-ssd"
|
||||||
initial_count = 3
|
initial_count = local.control_plane_count
|
||||||
zone = local.zone
|
zone = local.zone
|
||||||
},
|
},
|
||||||
worker_default = {
|
worker_default = {
|
||||||
@ -66,7 +68,7 @@ module "gcp_infrastructure" {
|
|||||||
instance_type = "n2d-standard-4"
|
instance_type = "n2d-standard-4"
|
||||||
disk_size = 30
|
disk_size = 30
|
||||||
disk_type = "pd-ssd"
|
disk_type = "pd-ssd"
|
||||||
initial_count = 2
|
initial_count = local.worker_count
|
||||||
zone = local.zone
|
zone = local.zone
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -87,7 +89,7 @@ data "constellation_attestation" "foo" {
|
|||||||
data "constellation_image" "bar" {
|
data "constellation_image" "bar" {
|
||||||
csp = local.csp
|
csp = local.csp
|
||||||
attestation_variant = local.attestation_variant
|
attestation_variant = local.attestation_variant
|
||||||
version = local.version
|
version = local.image_version
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "constellation_cluster" "gcp_example" {
|
resource "constellation_cluster" "gcp_example" {
|
||||||
|
Loading…
Reference in New Issue
Block a user