mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-06-02 21:23:17 -04:00
implement e2e test lb (#815)
* implement e2e test lb * add lb e2e test to weekly schedule Signed-off-by: Fabian Kammel <fk@edgeless.systems>
This commit is contained in:
parent
f1bee6dab8
commit
83f09e1058
9 changed files with 292 additions and 4 deletions
31
.github/actions/e2e_lb/action.yml
vendored
Normal file
31
.github/actions/e2e_lb/action.yml
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
name: E2E load balancer test
|
||||||
|
description: "Test load balancer functionality."
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
kubeconfig:
|
||||||
|
description: "The kubeconfig of the cluster to test."
|
||||||
|
required: true
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
# This action assumes that the cluster is in an ready state, with all nodes joined and ready.
|
||||||
|
- name: Create deployments
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
KUBECONFIG: ${{ inputs.kubeconfig }}
|
||||||
|
working-directory: ./.github/actions/e2e_lb
|
||||||
|
run: |
|
||||||
|
kubectl apply -f ns.yml
|
||||||
|
kubectl apply -f lb.yml
|
||||||
|
go test ../../../e2e/internal/lb/lb_test.go -v
|
||||||
|
|
||||||
|
- name: Delete deployment
|
||||||
|
if: always()
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
KUBECONFIG: ${{ inputs.kubeconfig }}
|
||||||
|
working-directory: ./.github/actions/e2e_lb
|
||||||
|
run: |
|
||||||
|
kubectl delete -f lb.yml
|
||||||
|
kubectl delete -f ns.yml
|
37
.github/actions/e2e_lb/lb.yml
vendored
Normal file
37
.github/actions/e2e_lb/lb.yml
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: whoami
|
||||||
|
namespace: lb-test
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: whoami
|
||||||
|
ports:
|
||||||
|
- port: 8080
|
||||||
|
targetPort: 80
|
||||||
|
type: LoadBalancer
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: whoami
|
||||||
|
namespace: lb-test
|
||||||
|
labels:
|
||||||
|
app: whoami
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: whoami
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: whoami
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: whoami
|
||||||
|
image: traefik/whoami:v1.8.7
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
4
.github/actions/e2e_lb/ns.yml
vendored
Normal file
4
.github/actions/e2e_lb/ns.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: lb-test
|
10
.github/actions/e2e_test/action.yml
vendored
10
.github/actions/e2e_test/action.yml
vendored
|
@ -57,7 +57,7 @@ inputs:
|
||||||
description: "The resource group to use"
|
description: "The resource group to use"
|
||||||
required: false
|
required: false
|
||||||
test:
|
test:
|
||||||
description: "The test to run. Can currently be one of [sonobuoy full, sonobuoy quick, autoscaling, k-bench, nop]."
|
description: "The test to run. Can currently be one of [sonobuoy full, sonobuoy quick, autoscaling, lb, k-bench, nop]."
|
||||||
required: true
|
required: true
|
||||||
sonobuoyTestSuiteCmd:
|
sonobuoyTestSuiteCmd:
|
||||||
description: "The sonobuoy test suite to run."
|
description: "The sonobuoy test suite to run."
|
||||||
|
@ -72,7 +72,7 @@ runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Check input
|
- name: Check input
|
||||||
if: ${{ !contains(fromJson('["sonobuoy full", "sonobuoy quick", "autoscaling", "k-bench", "nop"]'), inputs.test) }}
|
if: ${{ !contains(fromJson('["sonobuoy full", "sonobuoy quick", "autoscaling", "k-bench", "lb", "nop"]'), inputs.test) }}
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "Invalid input for test field: ${{ inputs.test }}"
|
echo "Invalid input for test field: ${{ inputs.test }}"
|
||||||
|
@ -193,6 +193,12 @@ runs:
|
||||||
with:
|
with:
|
||||||
kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }}
|
kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }}
|
||||||
|
|
||||||
|
- name: Run lb test
|
||||||
|
if: inputs.test == 'lb'
|
||||||
|
uses: ./.github/actions/e2e_lb
|
||||||
|
with:
|
||||||
|
kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }}
|
||||||
|
|
||||||
- name: Run K-Bench
|
- name: Run K-Bench
|
||||||
if: inputs.test == 'k-bench'
|
if: inputs.test == 'k-bench'
|
||||||
uses: ./.github/actions/e2e_kbench
|
uses: ./.github/actions/e2e_kbench
|
||||||
|
|
1
.github/workflows/e2e-test-manual-macos.yml
vendored
1
.github/workflows/e2e-test-manual-macos.yml
vendored
|
@ -27,6 +27,7 @@ on:
|
||||||
- "sonobuoy quick"
|
- "sonobuoy quick"
|
||||||
- "sonobuoy full"
|
- "sonobuoy full"
|
||||||
- "autoscaling"
|
- "autoscaling"
|
||||||
|
- "lb"
|
||||||
- "k-bench"
|
- "k-bench"
|
||||||
- "nop"
|
- "nop"
|
||||||
required: true
|
required: true
|
||||||
|
|
1
.github/workflows/e2e-test-manual.yml
vendored
1
.github/workflows/e2e-test-manual.yml
vendored
|
@ -27,6 +27,7 @@ on:
|
||||||
- "sonobuoy quick"
|
- "sonobuoy quick"
|
||||||
- "sonobuoy full"
|
- "sonobuoy full"
|
||||||
- "autoscaling"
|
- "autoscaling"
|
||||||
|
- "lb"
|
||||||
- "k-bench"
|
- "k-bench"
|
||||||
- "nop"
|
- "nop"
|
||||||
required: true
|
required: true
|
||||||
|
|
11
.github/workflows/e2e-test-weekly.yml
vendored
11
.github/workflows/e2e-test-weekly.yml
vendored
|
@ -38,7 +38,7 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
max-parallel: 5
|
max-parallel: 5
|
||||||
matrix:
|
matrix:
|
||||||
test: ["sonobuoy full", "autoscaling", "k-bench"]
|
test: ["sonobuoy full", "autoscaling", "k-bench", "lb"]
|
||||||
provider: ["gcp", "azure", "aws"]
|
provider: ["gcp", "azure", "aws"]
|
||||||
version: ["1.23", "1.24", "1.25", "1.26"]
|
version: ["1.23", "1.24", "1.25", "1.26"]
|
||||||
exclude:
|
exclude:
|
||||||
|
@ -54,7 +54,14 @@ jobs:
|
||||||
version: "1.23"
|
version: "1.23"
|
||||||
- test: "k-bench"
|
- test: "k-bench"
|
||||||
version: "1.24"
|
version: "1.24"
|
||||||
- test: "autoscaling"
|
- test: "k-bench"
|
||||||
|
version: "1.25"
|
||||||
|
# lb test runs only on latest version.
|
||||||
|
- test: "lb"
|
||||||
|
version: "1.23"
|
||||||
|
- test: "lb"
|
||||||
|
version: "1.24"
|
||||||
|
- test: "lb"
|
||||||
version: "1.25"
|
version: "1.25"
|
||||||
# Currently not supported on AWS.
|
# Currently not supported on AWS.
|
||||||
- test: "autoscaling"
|
- test: "autoscaling"
|
||||||
|
|
40
e2e/internal/kubectl/kubectl.go
Normal file
40
e2e/internal/kubectl/kubectl.go
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
/*
|
||||||
|
Copyright (c) Edgeless Systems GmbH
|
||||||
|
|
||||||
|
SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
*/
|
||||||
|
|
||||||
|
package kubectl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
)
|
||||||
|
|
||||||
|
// New creates a new k8s client. The kube config file is expected to be set
|
||||||
|
// via environment variable KUBECONFIG or located at ./constellation-admin.conf.
|
||||||
|
func New() (*kubernetes.Clientset, error) {
|
||||||
|
cfgPath := ""
|
||||||
|
if envPath := os.Getenv("KUBECONFIG"); envPath != "" {
|
||||||
|
cfgPath = envPath
|
||||||
|
fmt.Printf("K8s config path empty. Using environment variable %s=%s.\n", "KUBECONFIG", envPath)
|
||||||
|
} else {
|
||||||
|
cfgPath = "constellation-admin.conf"
|
||||||
|
fmt.Printf("K8s config path empty. Assuming '%s'.\n", cfgPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
kubeConfig, err := clientcmd.BuildConfigFromFlags("", cfgPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
k8sClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return k8sClient, nil
|
||||||
|
}
|
161
e2e/internal/lb/lb_test.go
Normal file
161
e2e/internal/lb/lb_test.go
Normal file
|
@ -0,0 +1,161 @@
|
||||||
|
//go:build e2elb
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright (c) Edgeless Systems GmbH
|
||||||
|
|
||||||
|
SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
*/
|
||||||
|
|
||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/v2/e2e/internal/kubectl"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
coreV1 "k8s.io/api/core/v1"
|
||||||
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
namespaceName = "lb-test"
|
||||||
|
serviceName = "whoami"
|
||||||
|
initialPort = int32(8080)
|
||||||
|
newPort = int32(8044)
|
||||||
|
numRequests = 256
|
||||||
|
numPods = 3
|
||||||
|
timeout = time.Minute * 5
|
||||||
|
interval = time.Second * 5
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLoadBalancer(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
k, err := kubectl.New()
|
||||||
|
require.NoError(err)
|
||||||
|
|
||||||
|
// Wait for external IP to be registered
|
||||||
|
svc := testEventuallyExternalIPAvailable(t, k)
|
||||||
|
loadBalancerIP := svc.Status.LoadBalancer.Ingress[0].IP
|
||||||
|
loadBalancerPort := svc.Spec.Ports[0].Port
|
||||||
|
require.Equal(initialPort, loadBalancerPort)
|
||||||
|
url := buildURL(t, loadBalancerIP, loadBalancerPort)
|
||||||
|
testEventuallyStatusOK(t, url)
|
||||||
|
|
||||||
|
// Check that all pods receive traffic
|
||||||
|
var allHostnames []string
|
||||||
|
for i := 0; i < numRequests; i++ {
|
||||||
|
allHostnames = testEndpointAvailable(t, url, allHostnames)
|
||||||
|
}
|
||||||
|
assert.True(hasNUniqueStrings(allHostnames, numPods))
|
||||||
|
allHostnames = allHostnames[:0]
|
||||||
|
|
||||||
|
// Change port to 8044
|
||||||
|
svc.Spec.Ports[0].Port = newPort
|
||||||
|
svc, err = k.CoreV1().Services(namespaceName).Update(context.Background(), svc, v1.UpdateOptions{})
|
||||||
|
require.NoError(err)
|
||||||
|
assert.Equal(newPort, svc.Spec.Ports[0].Port)
|
||||||
|
|
||||||
|
// Wait for changed port to be available
|
||||||
|
newURL := buildURL(t, loadBalancerIP, newPort)
|
||||||
|
testEventuallyStatusOK(t, newURL)
|
||||||
|
|
||||||
|
// Check again that all pods receive traffic
|
||||||
|
for i := 0; i < numRequests; i++ {
|
||||||
|
allHostnames = testEndpointAvailable(t, newURL, allHostnames)
|
||||||
|
}
|
||||||
|
assert.True(hasNUniqueStrings(allHostnames, numPods))
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasNUniqueStrings(elements []string, n int) bool {
|
||||||
|
m := make(map[string]bool)
|
||||||
|
for i := range elements {
|
||||||
|
m[elements[i]] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
numKeys := 0
|
||||||
|
for range m {
|
||||||
|
numKeys++
|
||||||
|
}
|
||||||
|
return numKeys == n
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildURL(t *testing.T, ip string, port int32) string {
|
||||||
|
t.Helper()
|
||||||
|
return fmt.Sprintf("http://%s:%d", ip, port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// testEventuallyStatusOK tests that the URL response with StatusOK within 5min.
|
||||||
|
func testEventuallyStatusOK(t *testing.T, url string) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
assert.Eventually(func() bool {
|
||||||
|
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, http.NoBody)
|
||||||
|
require.NoError(err)
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
return resp.StatusCode == http.StatusOK
|
||||||
|
}, timeout, interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
// testEventuallyExternalIPAvailable uses k to query if the whoami service is available
|
||||||
|
// within 5 minutes. Once the service is available the Service is returned.
|
||||||
|
func testEventuallyExternalIPAvailable(t *testing.T, k *kubernetes.Clientset) *coreV1.Service {
|
||||||
|
assert := assert.New(t)
|
||||||
|
require := require.New(t)
|
||||||
|
var svc *coreV1.Service
|
||||||
|
|
||||||
|
assert.Eventually(func() bool {
|
||||||
|
var err error
|
||||||
|
svc, err = k.CoreV1().Services(namespaceName).Get(context.Background(), serviceName, v1.GetOptions{})
|
||||||
|
require.NoError(err)
|
||||||
|
return len(svc.Status.LoadBalancer.Ingress) > 0
|
||||||
|
}, timeout, interval)
|
||||||
|
|
||||||
|
return svc
|
||||||
|
}
|
||||||
|
|
||||||
|
// testEndpointAvailable GETs the provided URL. It expects a payload from
|
||||||
|
// traefik/whoami service and checks that the first body line is of form
|
||||||
|
// Hostname: <pod-name>
|
||||||
|
// If this works the <pod-name> value is appended to allHostnames slice and
|
||||||
|
// new allHostnames is returned.
|
||||||
|
func testEndpointAvailable(t *testing.T, url string, allHostnames []string) []string {
|
||||||
|
assert := assert.New(t)
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, http.NoBody)
|
||||||
|
require.NoError(err)
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
assert.Equal(http.StatusOK, resp.StatusCode)
|
||||||
|
// Force close of connections so that we see different backends
|
||||||
|
http.DefaultClient.CloseIdleConnections()
|
||||||
|
|
||||||
|
firstLine, err := bufio.NewReader(resp.Body).ReadString('\n')
|
||||||
|
require.NoError(err)
|
||||||
|
parts := strings.Split(firstLine, ": ")
|
||||||
|
hostnameKey := parts[0]
|
||||||
|
hostnameValue := parts[1]
|
||||||
|
|
||||||
|
assert.Equal("Hostname", hostnameKey)
|
||||||
|
require.NotEmpty(hostnameValue)
|
||||||
|
|
||||||
|
return append(allHostnames, hostnameValue)
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue