mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-10-01 01:36:09 -04:00
ci: logcollection to OpenSearch in non-debug clusters (#2080)
* refactor `debugd` file structure * create `hack`-tool to deploy logcollection to non-debug clusters * integrate changes into CI * update fields * update workflow input names * use `working-directory` * add opensearch creds to upgrade workflow * make template func generic * make templating func generic * linebreaks * remove magic defaults * move `os.Exit` to main package * make logging index configurable * make templating generic * remove excess brace * update fields * copy fields * fix flag name * fix linter warnings Signed-off-by: Paul Meyer <49727155+katexochen@users.noreply.github.com> * remove unused workflow inputs * remove makefiles * fix command * bazel: fix output paths of container This fixes the output paths of builds within the container by mounting directories to paths that exist on the host. We also explicitly set the output path in a .bazelrc to the user specific path. The rc file is mounted into the container and overrides the host rc. Also adding automatic stop in case start is called and a containers is already running. Sym links like bazel-out and paths bazel outputs should generally work with this change. Signed-off-by: Paul Meyer <49727155+katexochen@users.noreply.github.com> * tabs -> spaces --------- Signed-off-by: Paul Meyer <49727155+katexochen@users.noreply.github.com> Co-authored-by: Paul Meyer <49727155+katexochen@users.noreply.github.com>
This commit is contained in:
parent
7d040c26ba
commit
60bf770e62
@ -35,6 +35,9 @@ inputs:
|
||||
azureSNPEnforcementPolicy:
|
||||
required: false
|
||||
description: "Azure SNP enforcement policy."
|
||||
test:
|
||||
description: "The e2e test payload."
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
kubeconfig:
|
||||
@ -131,6 +134,9 @@ runs:
|
||||
--info logcollect.github.ref-name="${{ github.ref_name }}" \
|
||||
--info logcollect.github.sha="${{ github.sha }}" \
|
||||
--info logcollect.github.runner-os="${{ runner.os }}" \
|
||||
--info logcollect.github.e2e-test-payload="${{ inputs.test }}" \
|
||||
--info logcollect.github.is-debug-cluster=false \
|
||||
--info logcollect.deployment-type="debugd" \
|
||||
--verbosity=-1 \
|
||||
--force
|
||||
echo "::endgroup::"
|
||||
|
73
.github/actions/deploy_logcollection/action.yml
vendored
Normal file
73
.github/actions/deploy_logcollection/action.yml
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
name: Log Collection Deployment
|
||||
description: Deploy log collection functionality to the cluster.
|
||||
|
||||
inputs:
|
||||
logstash-port:
|
||||
description: "The port of the logstash service."
|
||||
default: "5045"
|
||||
kubeconfig:
|
||||
description: "The kubeconfig of the cluster to deploy to."
|
||||
required: true
|
||||
opensearchUser:
|
||||
description: "The username of the opensearch cluster."
|
||||
required: true
|
||||
opensearchPwd:
|
||||
description: "The password of the opensearch cluster."
|
||||
required: true
|
||||
test:
|
||||
description: "The e2e test payload."
|
||||
required: true
|
||||
provider:
|
||||
description: "The CSP of the cluster."
|
||||
required: true
|
||||
isDebugImage:
|
||||
description: "Whether the cluster is a debug cluster / uses a debug image."
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Template Logcollection Helm Values
|
||||
id: template
|
||||
shell: bash
|
||||
run: |
|
||||
bazel run //hack/logcollector template -- \
|
||||
--dir $(realpath .) \
|
||||
--username ${{ inputs.opensearchUser }} \
|
||||
--password ${{ inputs.opensearchPwd }} \
|
||||
--port ${{ inputs.logstash-port }} \
|
||||
--fields github.actor="${{ github.triggering_actor }}" \
|
||||
--fields github.workflow="${{ github.workflow }}" \
|
||||
--fields github.run-id="${{ github.run_id }}" \
|
||||
--fields github.run-attempt="${{ github.run_attempt }}" \
|
||||
--fields github.ref-name="${{ github.ref_name }}" \
|
||||
--fields github.sha="${{ github.sha }}" \
|
||||
--fields github.runner-os="${{ runner.os }}" \
|
||||
--fields github.e2e-test-payload="${{ inputs.test }}" \
|
||||
--fields github.isDebugImage="${{ inputs.isDebugImage }}" \
|
||||
--fields github.e2e-test-provider="${{ inputs.provider }}" \
|
||||
--fields deployment-type="k8s"
|
||||
|
||||
- name: Deploy Logstash
|
||||
id: deploy-logstash
|
||||
shell: bash
|
||||
working-directory: ./logstash
|
||||
env:
|
||||
KUBECONFIG: ${{ inputs.kubeconfig }}
|
||||
run: |
|
||||
helm repo add elastic https://helm.elastic.co
|
||||
helm repo update
|
||||
helm install logstash elastic/logstash \
|
||||
--wait --timeout=1200s --values values.yml
|
||||
|
||||
- name: Deploy Filebeat
|
||||
id: deploy-filebeat
|
||||
shell: bash
|
||||
working-directory: ./filebeat
|
||||
env:
|
||||
KUBECONFIG: ${{ inputs.kubeconfig }}
|
||||
run: |
|
||||
helm repo add elastic https://helm.elastic.co
|
||||
helm repo update
|
||||
helm install filebeat elastic/filebeat \
|
||||
--wait --timeout=1200s --values values.yml
|
13
.github/actions/e2e_test/action.yml
vendored
13
.github/actions/e2e_test/action.yml
vendored
@ -248,6 +248,19 @@ runs:
|
||||
cliVersion: ${{ inputs.cliVersion }}
|
||||
azureSNPEnforcementPolicy: ${{ inputs.azureSNPEnforcementPolicy }}
|
||||
|
||||
- name: Deploy logcollection
|
||||
id: deploy-logcollection
|
||||
# TODO(msanft):temporarily deploy in debug clusters too to resolve "missing logs"-bug
|
||||
# see https://dev.azure.com/Edgeless/Edgeless/_workitems/edit/3227
|
||||
# if: inputs.isDebugImage == 'false'
|
||||
uses: ./.github/actions/deploy_logcollection
|
||||
with:
|
||||
kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }}
|
||||
opensearchUser: ${{ inputs.awsOpenSearchUsers }}
|
||||
opensearchPwd: ${{ inputs.awsOpenSearchPwd }}
|
||||
test: ${{ inputs.test }}
|
||||
provider: ${{ inputs.cloudProvider }}
|
||||
isDebugImage: ${{ inputs.isDebugImage }}
|
||||
#
|
||||
# Test payloads
|
||||
#
|
||||
|
16
.github/workflows/build-logcollector-images.yml
vendored
16
.github/workflows/build-logcollector-images.yml
vendored
@ -6,13 +6,13 @@ on:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "debugd/internal/debugd/logcollector/Makefile"
|
||||
- "debugd/internal/debugd/logcollector/filebeat/**"
|
||||
- "debugd/internal/debugd/logcollector/logstash/**"
|
||||
- "debugd/filebeat/**"
|
||||
- "debugd/logstash/**"
|
||||
- "hack/logcollector/internal/templates/**"
|
||||
- ".github/workflows/build-logcollector-images.yml"
|
||||
|
||||
jobs:
|
||||
build-logcollector-images:
|
||||
build-logcollector-debugd-images:
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: read
|
||||
@ -24,16 +24,16 @@ jobs:
|
||||
with:
|
||||
ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }}
|
||||
|
||||
- name: Build and upload logstash container image
|
||||
- name: Build and upload Logstash container image
|
||||
uses: ./.github/actions/build_micro_service
|
||||
with:
|
||||
name: logstash-debugd
|
||||
dockerfile: debugd/internal/debugd/logcollector/logstash/Dockerfile
|
||||
dockerfile: debugd/logstash/Dockerfile
|
||||
githubToken: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and upload filebeat container image
|
||||
- name: Build and upload Filebeat container image
|
||||
uses: ./.github/actions/build_micro_service
|
||||
with:
|
||||
name: filebeat-debugd
|
||||
dockerfile: debugd/internal/debugd/logcollector/filebeat/Dockerfile
|
||||
dockerfile: debugd/filebeat/Dockerfile
|
||||
githubToken: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
5
.github/workflows/e2e-test-daily.yml
vendored
5
.github/workflows/e2e-test-daily.yml
vendored
@ -87,6 +87,9 @@ jobs:
|
||||
cosignPassword: ${{ secrets.COSIGN_PASSWORD }}
|
||||
cosignPrivateKey: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
fetchMeasurements: ${{ matrix.refStream != 'ref/release/stream/stable/?' }}
|
||||
awsOpenSearchDomain: ${{ secrets.AWS_OPENSEARCH_DOMAIN }}
|
||||
awsOpenSearchUsers: ${{ secrets.AWS_OPENSEARCH_USER }}
|
||||
awsOpenSearchPwd: ${{ secrets.AWS_OPENSEARCH_PWD }}
|
||||
|
||||
- name: Always terminate cluster
|
||||
if: always()
|
||||
@ -122,7 +125,7 @@ jobs:
|
||||
test: ${{ matrix.test }}
|
||||
kubernetesVersion: ${{ matrix.kubernetesVersion }}
|
||||
provider: ${{ matrix.provider }}
|
||||
|
||||
|
||||
e2e-mini:
|
||||
name: Run miniconstellation E2E test
|
||||
runs-on: ubuntu-22.04
|
||||
|
3
.github/workflows/e2e-upgrade.yml
vendored
3
.github/workflows/e2e-upgrade.yml
vendored
@ -175,6 +175,9 @@ jobs:
|
||||
azureIAMCreateCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }}
|
||||
registry: ghcr.io
|
||||
githubToken: ${{ secrets.GITHUB_TOKEN }}
|
||||
awsOpenSearchDomain: ${{ secrets.AWS_OPENSEARCH_DOMAIN }}
|
||||
awsOpenSearchUsers: ${{ secrets.AWS_OPENSEARCH_USER }}
|
||||
awsOpenSearchPwd: ${{ secrets.AWS_OPENSEARCH_PWD }}
|
||||
|
||||
- name: Build CLI
|
||||
uses: ./.github/actions/build_cli
|
||||
|
@ -1,6 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
function setup {
|
||||
(stopBazelServer && sleep 1) || true
|
||||
|
||||
# Ensure that the cache directories exist, so they are not created by docker with root permissions.
|
||||
mkdir -p "${HOME}/.cache/bazel"
|
||||
mkdir -p "${HOME}/.cache/shared_bazel_repository_cache"
|
||||
@ -20,6 +22,12 @@ function startBazelServer {
|
||||
|
||||
setup
|
||||
|
||||
# In-container .bazelrc overwrite.
|
||||
mkdir -p "/tmp/bazel-container"
|
||||
cat << EOF > "/tmp/bazel-container/.bazelrc"
|
||||
startup --output_user_root=/home/${USER}/.cache/bazel/_bazel_${USER}
|
||||
EOF
|
||||
|
||||
local hostWorkspaceDir
|
||||
hostWorkspaceDir="$(git rev-parse --show-toplevel)"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
@ -36,10 +44,11 @@ function startBazelServer {
|
||||
--detach \
|
||||
--name "${containerName}" \
|
||||
-v "${hostWorkspaceDir}":/workspace \
|
||||
-v "${HOME}/.cache/bazel":"/home/builder/.cache/bazel" \
|
||||
-v "${HOME}/.cache/shared_bazel_repository_cache":"/home/builder/.cache/shared_bazel_repository_cache" \
|
||||
-v "${HOME}/.cache/shared_bazel_action_cache":"/home/builder/.cache/shared_bazel_action_cache" \
|
||||
-v "${HOME}/.cache/bazel":"${HOME}/.cache/bazel" \
|
||||
-v "${HOME}/.cache/shared_bazel_repository_cache":"${HOME}/.cache/shared_bazel_repository_cache" \
|
||||
-v "${HOME}/.cache/shared_bazel_action_cache":"${HOME}/.cache/shared_bazel_action_cache" \
|
||||
-v "${HOME}/.docker/config.json":"/home/builder/.docker/config.json" \
|
||||
-v "/tmp/bazel-container/.bazelrc":"/etc/bazel.bazelrc" \
|
||||
--entrypoint=/bin/sleep \
|
||||
"${containerImage}" \
|
||||
infinity || return $?
|
||||
|
12
debugd/filebeat/BUILD.bazel
Normal file
12
debugd/filebeat/BUILD.bazel
Normal file
@ -0,0 +1,12 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "filebeat",
|
||||
srcs = ["assets.go"],
|
||||
embedsrcs = [
|
||||
"templates/filebeat.yml",
|
||||
"inputs.yml",
|
||||
],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/debugd/filebeat",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -2,8 +2,7 @@ FROM fedora:38@sha256:61f921e0c7b51e162e6f94b14ef4e6b0d38eac5987286fe4f52a2c1158
|
||||
|
||||
RUN dnf install -y https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-8.6.2-x86_64.rpm
|
||||
|
||||
COPY debugd/internal/debugd/logcollector/filebeat/filebeat.yml /usr/share/filebeat/filebeat.yml
|
||||
|
||||
COPY debugd/internal/debugd/logcollector/filebeat/inputs.yml /usr/share/filebeat/inputs.d/inputs.yml
|
||||
COPY debugd/filebeat/inputs.yml /usr/share/filebeat/inputs.yml
|
||||
COPY debugd/filebeat/templates/ /usr/share/filebeat/templates/
|
||||
|
||||
ENTRYPOINT ["/usr/share/filebeat/bin/filebeat", "-e", "--path.home", "/usr/share/filebeat", "--path.data", "/usr/share/filebeat/data"]
|
15
debugd/filebeat/assets.go
Normal file
15
debugd/filebeat/assets.go
Normal file
@ -0,0 +1,15 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package filebeat
|
||||
|
||||
import "embed"
|
||||
|
||||
// Assets are the exported Filebeat template files.
|
||||
//
|
||||
//go:embed *.yml
|
||||
//go:embed templates/*
|
||||
var Assets embed.FS
|
@ -1,5 +1,5 @@
|
||||
output.logstash:
|
||||
hosts: ["localhost:5044"]
|
||||
hosts: ["{{ .LogstashHost }}"]
|
||||
|
||||
output.console:
|
||||
enabled: false
|
||||
@ -12,7 +12,7 @@ logging:
|
||||
filebeat.config:
|
||||
inputs:
|
||||
enabled: true
|
||||
path: /usr/share/filebeat/inputs.d/*.yml
|
||||
path: /usr/share/filebeat/inputs.yml
|
||||
# reload.enabled: true
|
||||
# reload.period: 10s
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -125,7 +124,7 @@ func deploy(cmd *cobra.Command, fileHandler file.Handler, constellationConfig *c
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkInfoMap(info); err != nil {
|
||||
if err := logcollector.FieldsFromMap(info).Check(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -281,22 +280,6 @@ func uploadFiles(ctx context.Context, client pb.DebugdClient, in deployOnEndpoin
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkInfoMap(info map[string]string) error {
|
||||
logPrefix, logFields := logcollector.InfoFields()
|
||||
for k := range info {
|
||||
if !strings.HasPrefix(k, logPrefix) {
|
||||
continue
|
||||
}
|
||||
subkey := strings.TrimPrefix(k, logPrefix)
|
||||
|
||||
if _, ok := logFields[subkey]; !ok {
|
||||
return fmt.Errorf("invalid subkey %q for info key %q", subkey, fmt.Sprintf("%s.%s", logPrefix, k))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type fileTransferer interface {
|
||||
SendFiles(stream filetransfer.SendFilesStream) error
|
||||
SetFiles(files []filetransfer.FileStat)
|
||||
|
@ -3,22 +3,69 @@ Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package logcollector
|
||||
|
||||
// InfoFields are the fields that are allowed in the info map
|
||||
// under the prefix "logcollect.".
|
||||
func InfoFields() (string, map[string]struct{}) {
|
||||
return "logcollect.", map[string]struct{}{
|
||||
"admin": {}, // name of the person running the cdbg command
|
||||
// THIS FILE IS A DUPLICATE OF hack/logcollector/fields/fields.go
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// DebugdLogcollectPrefix is the prefix for all OpenSearch fields specified by the user when starting through debugd.
|
||||
DebugdLogcollectPrefix = "logcollect."
|
||||
// AllowedFields are the fields that are allowed to be used in the logcollection.
|
||||
AllowedFields = map[string]struct{}{
|
||||
"admin": {}, // name of the person running the cdbg command
|
||||
"is_debug_cluster": {}, // whether the cluster is a debug cluster
|
||||
// GitHub workflow information, see https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables
|
||||
"github.actor": {},
|
||||
"github.workflow": {},
|
||||
"github.run-id": {},
|
||||
"github.run-attempt": {},
|
||||
"github.ref-name": {},
|
||||
"github.sha": {},
|
||||
"github.runner-os": {},
|
||||
"github.actor": {},
|
||||
"github.workflow": {},
|
||||
"github.run-id": {},
|
||||
"github.run-attempt": {},
|
||||
"github.ref-name": {},
|
||||
"github.sha": {},
|
||||
"github.runner-os": {},
|
||||
"github.e2e-test-payload": {},
|
||||
"github.is-debug-cluster": {},
|
||||
// cloud provider used in e2e test. If deployed with debugd, this is a duplicate as its also
|
||||
// available in the metadata. If deployed through K8s in e2e tests with a stable image, this
|
||||
// is where the cloud provider is saved in.
|
||||
"github.e2e-test-provider": {},
|
||||
"deployment-type": {}, // deployment type, e.g. "debugd", "k8s"
|
||||
}
|
||||
)
|
||||
|
||||
// FieldsFromMap returns new Fields from the given map.
|
||||
func FieldsFromMap(m map[string]string) Fields {
|
||||
return Fields(m)
|
||||
}
|
||||
|
||||
// Fields are the OpenSearch fields that are associated with a log message.
|
||||
type Fields map[string]string
|
||||
|
||||
// Extend adds the fields from other to f and returns the result.
|
||||
func (f Fields) Extend(other Fields) Fields {
|
||||
for k, v := range other {
|
||||
f[k] = v
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// Check checks whether all the fields in f are allowed. For fields that are prefixed with the debugd logcollect prefix are
|
||||
// only the subkeys are checked.
|
||||
func (f Fields) Check() error {
|
||||
for k := range f {
|
||||
if !strings.HasPrefix(k, DebugdLogcollectPrefix) {
|
||||
continue
|
||||
}
|
||||
subkey := strings.TrimPrefix(k, DebugdLogcollectPrefix)
|
||||
|
||||
if _, ok := AllowedFields[subkey]; !ok {
|
||||
return fmt.Errorf("invalid subkey %q for info key %q", subkey, fmt.Sprintf("%s%s", DebugdLogcollectPrefix, k))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
@ -74,7 +75,7 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov
|
||||
}
|
||||
|
||||
logger.Infof("Getting logstash pipeline template")
|
||||
tmpl, err := getTemplate(ctx, logger)
|
||||
tmpl, err := getTemplate(ctx, logger, versions.LogstashImage, "/run/logstash/templates/pipeline.conf", "/run/logstash")
|
||||
if err != nil {
|
||||
logger.Errorf("Getting logstash pipeline template: %v", err)
|
||||
return
|
||||
@ -90,15 +91,31 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov
|
||||
|
||||
logger.Infof("Writing logstash pipeline")
|
||||
pipelineConf := logstashConfInput{
|
||||
Port: 5044,
|
||||
Host: openSearchHost,
|
||||
IndexPrefix: "systemd-logs",
|
||||
InfoMap: infoMapM,
|
||||
Credentials: creds,
|
||||
}
|
||||
if err := writeLogstashPipelineConf(tmpl, pipelineConf); err != nil {
|
||||
if err := writeTemplate("/run/filebeat/filebeat.yml", tmpl, pipelineConf); err != nil {
|
||||
logger.Errorf("Writing logstash pipeline: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Infof("Getting logstash config template")
|
||||
tmpl, err = getTemplate(ctx, logger, versions.FilebeatImage, "/run/filebeat/templates/filebeat.yml", "/run/filebeat")
|
||||
if err != nil {
|
||||
logger.Errorf("Getting filebeat config template: %v", err)
|
||||
return
|
||||
}
|
||||
filebeatConf := filebeatConfInput{
|
||||
LogstashHost: "localhost:5044",
|
||||
}
|
||||
if err := writeTemplate("/run/logstash/pipeline/pipeline.conf", tmpl, filebeatConf); err != nil {
|
||||
logger.Errorf("Writing filebeat config: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Infof("Starting log collection pod")
|
||||
if err := startPod(ctx, logger); err != nil {
|
||||
logger.Errorf("Starting filebeat: %v", err)
|
||||
@ -107,31 +124,31 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov
|
||||
}
|
||||
}
|
||||
|
||||
func getTemplate(ctx context.Context, logger *logger.Logger) (*template.Template, error) {
|
||||
func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir, destDir string) (*template.Template, error) {
|
||||
createContainerArgs := []string{
|
||||
"create",
|
||||
"--name=template",
|
||||
versions.LogstashImage,
|
||||
image,
|
||||
}
|
||||
createContainerCmd := exec.CommandContext(ctx, "podman", createContainerArgs...)
|
||||
logger.Infof("Creating logstash template container")
|
||||
logger.Infof("Creating template container")
|
||||
if out, err := createContainerCmd.CombinedOutput(); err != nil {
|
||||
return nil, fmt.Errorf("creating logstash template container: %w\n%s", err, out)
|
||||
return nil, fmt.Errorf("creating template container: %w\n%s", err, out)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll("/run/logstash", 0o777); err != nil {
|
||||
return nil, fmt.Errorf("creating logstash template dir: %w", err)
|
||||
if err := os.MkdirAll(destDir, 0o777); err != nil {
|
||||
return nil, fmt.Errorf("creating template dir: %w", err)
|
||||
}
|
||||
|
||||
copyFromArgs := []string{
|
||||
"cp",
|
||||
"template:/usr/share/constellogs/templates/",
|
||||
"/run/logstash/",
|
||||
destDir,
|
||||
}
|
||||
copyFromCmd := exec.CommandContext(ctx, "podman", copyFromArgs...)
|
||||
logger.Infof("Copying logstash templates")
|
||||
logger.Infof("Copying templates")
|
||||
if out, err := copyFromCmd.CombinedOutput(); err != nil {
|
||||
return nil, fmt.Errorf("copying logstash templates: %w\n%s", err, out)
|
||||
return nil, fmt.Errorf("copying templates: %w\n%s", err, out)
|
||||
}
|
||||
|
||||
removeContainerArgs := []string{
|
||||
@ -139,14 +156,14 @@ func getTemplate(ctx context.Context, logger *logger.Logger) (*template.Template
|
||||
"template",
|
||||
}
|
||||
removeContainerCmd := exec.CommandContext(ctx, "podman", removeContainerArgs...)
|
||||
logger.Infof("Removing logstash template container")
|
||||
logger.Infof("Removing template container")
|
||||
if out, err := removeContainerCmd.CombinedOutput(); err != nil {
|
||||
return nil, fmt.Errorf("removing logstash template container: %w\n%s", err, out)
|
||||
return nil, fmt.Errorf("removing template container: %w\n%s", err, out)
|
||||
}
|
||||
|
||||
tmpl, err := template.ParseFiles("/run/logstash/templates/pipeline.conf")
|
||||
tmpl, err := template.ParseFiles(templateDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing logstash template: %w", err)
|
||||
return nil, fmt.Errorf("parsing template: %w", err)
|
||||
}
|
||||
|
||||
return tmpl, nil
|
||||
@ -198,6 +215,7 @@ func startPod(ctx context.Context, logger *logger.Logger) error {
|
||||
"--volume=/run/systemd:/run/systemd:ro",
|
||||
"--volume=/run/systemd/journal/socket:/run/systemd/journal/socket:rw",
|
||||
"--volume=/run/state/var/log:/var/log:ro",
|
||||
"--volume=/run/filebeat:/usr/share/filebeat/:ro",
|
||||
versions.FilebeatImage,
|
||||
}
|
||||
runFilebeatCmd := exec.CommandContext(ctx, "podman", runFilebeatArgs...)
|
||||
@ -212,24 +230,30 @@ func startPod(ctx context.Context, logger *logger.Logger) error {
|
||||
}
|
||||
|
||||
type logstashConfInput struct {
|
||||
Port int
|
||||
Host string
|
||||
IndexPrefix string
|
||||
InfoMap map[string]string
|
||||
Credentials credentials
|
||||
}
|
||||
|
||||
func writeLogstashPipelineConf(templ *template.Template, in logstashConfInput) error {
|
||||
if err := os.MkdirAll("/run/logstash/pipeline", 0o777); err != nil {
|
||||
return fmt.Errorf("creating logstash config dir: %w", err)
|
||||
type filebeatConfInput struct {
|
||||
LogstashHost string
|
||||
}
|
||||
|
||||
func writeTemplate(path string, templ *template.Template, in any) error {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o777); err != nil {
|
||||
return fmt.Errorf("creating template dir: %w", err)
|
||||
}
|
||||
|
||||
file, err := os.OpenFile("/run/logstash/pipeline/pipeline.conf", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o777)
|
||||
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o777)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening logstash config file: %w", err)
|
||||
return fmt.Errorf("opening template file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if err := templ.Execute(file, in); err != nil {
|
||||
return fmt.Errorf("executing logstash pipeline template: %w", err)
|
||||
return fmt.Errorf("executing template: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -57,7 +57,7 @@ filter {
|
||||
output {
|
||||
opensearch {
|
||||
hosts => "{{ .Host }}"
|
||||
index => "systemd-logs-%{+YYYY.MM.dd}"
|
||||
index => "{{ .IndexPrefix }}-%{+YYYY.MM.dd}"
|
||||
user => "{{ .Credentials.Username }}"
|
||||
password => "{{ .Credentials.Password }}"
|
||||
ssl => true
|
||||
|
14
debugd/logstash/BUILD.bazel
Normal file
14
debugd/logstash/BUILD.bazel
Normal file
@ -0,0 +1,14 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "logstash",
|
||||
srcs = ["assets.go"],
|
||||
embedsrcs = [
|
||||
"config/log4j2.properties",
|
||||
"config/logstash.yml",
|
||||
"config/pipelines.yml",
|
||||
"templates/pipeline.conf",
|
||||
],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/debugd/logstash",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -9,8 +9,8 @@ FROM fedora:38@sha256:61f921e0c7b51e162e6f94b14ef4e6b0d38eac5987286fe4f52a2c1158
|
||||
|
||||
COPY --from=build logstash-* /usr/share/logstash
|
||||
|
||||
COPY debugd/internal/debugd/logcollector/logstash/config/ /usr/share/logstash/config/
|
||||
COPY debugd/internal/debugd/logcollector/logstash/templates/ /usr/share/constellogs/templates/
|
||||
COPY debugd/logstash/config/ /usr/share/logstash/config/
|
||||
COPY debugd/logstash/templates/ /usr/share/constellogs/templates/
|
||||
|
||||
RUN chmod -R 777 /usr/share/logstash/data/
|
||||
|
15
debugd/logstash/assets.go
Normal file
15
debugd/logstash/assets.go
Normal file
@ -0,0 +1,15 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package logstash
|
||||
|
||||
import "embed"
|
||||
|
||||
// Assets are the exported Logstash template files.
|
||||
//
|
||||
//go:embed config/*
|
||||
//go:embed templates/*
|
||||
var Assets embed.FS
|
66
debugd/logstash/templates/pipeline.conf
Normal file
66
debugd/logstash/templates/pipeline.conf
Normal file
@ -0,0 +1,66 @@
|
||||
input {
|
||||
beats {
|
||||
host => "0.0.0.0"
|
||||
port => {{ .Port }}
|
||||
}
|
||||
}
|
||||
|
||||
filter {
|
||||
mutate {
|
||||
# Remove some fields that are not needed.
|
||||
remove_field => [
|
||||
"[agent]",
|
||||
"[journald]",
|
||||
"[syslog]",
|
||||
"[systemd][invocation_id]",
|
||||
"[event][original]",
|
||||
"[log][offset]",
|
||||
"[log][syslog]"
|
||||
]
|
||||
|
||||
# Tag with the provided metadata.
|
||||
add_field => {
|
||||
{{ range $key, $value := .InfoMap }}
|
||||
"[metadata][{{ $key }}]" => "{{ $value }}"
|
||||
{{ end }}
|
||||
}
|
||||
}
|
||||
|
||||
# Parse structured logs for following systemd units.
|
||||
if [systemd][unit] in ["bootstrapper.service", "constellation-bootstrapper.service"] {
|
||||
# skip_on_invalid_json below does not skip the whole filter, so let's use a cheap workaround here.
|
||||
# See:
|
||||
# https://discuss.elastic.co/t/skip-on-invalid-json-skipping-all-filters/215195
|
||||
# https://discuss.elastic.co/t/looking-for-a-way-to-detect-json/102263
|
||||
if [message] =~ "\A\{.+\}\z" {
|
||||
json {
|
||||
source => "message"
|
||||
target => "logs"
|
||||
skip_on_invalid_json => true
|
||||
}
|
||||
mutate {
|
||||
replace => {
|
||||
"message" => "%{[logs][msg]}"
|
||||
}
|
||||
remove_field => [
|
||||
"[logs][msg]",
|
||||
"[logs][ts]"
|
||||
]
|
||||
}
|
||||
de_dot {
|
||||
fields => ["[logs][peer.address]"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output {
|
||||
opensearch {
|
||||
hosts => "{{ .Host }}"
|
||||
index => "systemd-logs-%{+YYYY.MM.dd}"
|
||||
user => "{{ .Credentials.Username }}"
|
||||
password => "{{ .Credentials.Password }}"
|
||||
ssl => true
|
||||
ssl_certificate_verification => true
|
||||
}
|
||||
}
|
59
dev-docs/workflows/logcollection.md
Normal file
59
dev-docs/workflows/logcollection.md
Normal file
@ -0,0 +1,59 @@
|
||||
# Logcollection
|
||||
|
||||
One can deploy [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html) and [Logstash](https://www.elastic.co/guide/en/logstash/current/index.html) to enable collection of logs to [OpenSearch](https://search-e2e-logs-y46renozy42lcojbvrt3qq7csm.eu-central-1.es.amazonaws.com/_dashboards/app/home#/), which allows for agreggation and easy inspection of said logs.
|
||||
The logcollection functionality can be deployed to both [debug](./debug-cluster.md) and non-debug clusters.
|
||||
|
||||
## Deployment in Debug Clusters
|
||||
|
||||
In debug clusters, logcollection functionality should be deployed automatically through the debug daemon `debugd`, which runs *before* the bootstrapper
|
||||
and can therefore, contrary to non-debug clusters, also collect logs of the bootstrapper.
|
||||
|
||||
## Deployment in Non-Debug Clusters
|
||||
|
||||
In non-debug clusters, logcollection functionality needs to be explicitly deployed as a Kubernetes Deployment through Helm. To do that, a few steps need to be followed:
|
||||
|
||||
1. Template the deployment configuration through the `loco` CLI.
|
||||
|
||||
```bash
|
||||
bazel run //hack/logcollector template -- \
|
||||
--dir $(realpath .) \
|
||||
--username <OPENSEARCH_USERNAME> \
|
||||
--password <OPENSEARCH_PW> \
|
||||
--info deployment-type={k8s, debugd}
|
||||
...
|
||||
```
|
||||
|
||||
This will place the templated configuration in the current directory. OpenSearch user credentials can be created by any admin in OpenSearch.
|
||||
Logging in with your company CSP accounts should grant you sufficient permissions to [create a user](https://opensearch.org/docs/latest/security/access-control/users-roles/#create-users)
|
||||
and [grant him the required `all_access` role](https://opensearch.org/docs/latest/security/access-control/users-roles/#map-users-to-roles).
|
||||
One can add additional key-value pairs to the configuration by appending `--info key=value` to the command.
|
||||
These key-value pairs will be attached to the log entries and can be used to filter them in OpenSearch.
|
||||
For example, it might be helpful to add a `test=<xyz>` tag to be able to filter out logs from a specific test run.
|
||||
2. Deploy Logstash
|
||||
|
||||
```bash
|
||||
cd logstash
|
||||
make add
|
||||
make install
|
||||
cd ..
|
||||
```
|
||||
|
||||
This will add the required Logstash Helm charts and deploy them to your cluster.
|
||||
3. Deploy Filebeat
|
||||
|
||||
```bash
|
||||
cd filebeat
|
||||
make add
|
||||
make install
|
||||
cd ..
|
||||
```
|
||||
|
||||
This will add the required Filebeat Helm charts and deploy them to your cluster.
|
||||
|
||||
To remove Logstash or Filebeat, `cd` into the corresponding directory and run `make remove`.
|
||||
|
||||
## Inspecting Logs in OpenSearch
|
||||
|
||||
To search through logs in OpenSearch, head to the [discover page](https://search-e2e-logs-y46renozy42lcojbvrt3qq7csm.eu-central-1.es.amazonaws.com/_dashboards/app/discover) in the
|
||||
OpenSearch dashboard and configure the timeframe selector in the top right accordingly.
|
||||
Click `Refresh`. You can now see all logs recorded in the specified timeframe. To get a less cluttered view, select the fields you want to inspect in the left sidebar.
|
@ -51,6 +51,7 @@ require (
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df
|
||||
golang.org/x/mod v0.12.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
libvirt.org/go/libvirt v1.9004.0
|
||||
)
|
||||
|
||||
@ -299,7 +300,6 @@ require (
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
helm.sh/helm v2.17.0+incompatible // indirect
|
||||
helm.sh/helm/v3 v3.12.1 // indirect
|
||||
k8s.io/api v0.27.3 // indirect
|
||||
|
15
hack/logcollector/BUILD.bazel
Normal file
15
hack/logcollector/BUILD.bazel
Normal file
@ -0,0 +1,15 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "logcollector_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/hack/logcollector",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["//hack/logcollector/cmd"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "logcollector",
|
||||
embed = [":logcollector_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
16
hack/logcollector/cmd/BUILD.bazel
Normal file
16
hack/logcollector/cmd/BUILD.bazel
Normal file
@ -0,0 +1,16 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "cmd",
|
||||
srcs = [
|
||||
"root.go",
|
||||
"template.go",
|
||||
],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/hack/logcollector/cmd",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//hack/logcollector/fields",
|
||||
"//hack/logcollector/internal",
|
||||
"@com_github_spf13_cobra//:cobra",
|
||||
],
|
||||
)
|
28
hack/logcollector/cmd/root.go
Normal file
28
hack/logcollector/cmd/root.go
Normal file
@ -0,0 +1,28 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newRootCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "loco",
|
||||
Short: "Constellation logcollection client",
|
||||
Long: `LoCo is the Constellation LOgCOllection client.
|
||||
It prepares filebeat and logstash configurations for deployment.`,
|
||||
}
|
||||
|
||||
cmd.AddCommand(newTemplateCmd())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Execute starts the CLI.
|
||||
func Execute() error {
|
||||
return newRootCmd().Execute()
|
||||
}
|
123
hack/logcollector/cmd/template.go
Normal file
123
hack/logcollector/cmd/template.go
Normal file
@ -0,0 +1,123 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/hack/logcollector/fields"
|
||||
"github.com/edgelesssys/constellation/v2/hack/logcollector/internal"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newTemplateCmd() *cobra.Command {
|
||||
templateCmd := &cobra.Command{
|
||||
Use: "template",
|
||||
Short: "Templates filebeat and logstash configurations and prepares them for deployment",
|
||||
Long: `Templates filebeat and logstash configurations and prepares them for deployment by placing them in the specified directory.`,
|
||||
RunE: runTemplate,
|
||||
}
|
||||
|
||||
templateCmd.Flags().String("dir", "", "Directory to place the templated configurations in (required)")
|
||||
must(templateCmd.MarkFlagRequired("dir"))
|
||||
must(templateCmd.MarkFlagDirname("dir"))
|
||||
templateCmd.Flags().String("username", "", "OpenSearch username (required)")
|
||||
must(templateCmd.MarkFlagRequired("username"))
|
||||
templateCmd.Flags().String("password", "", "OpenSearch password (required)")
|
||||
must(templateCmd.MarkFlagRequired("password"))
|
||||
templateCmd.Flags().String("index-prefix", "systemd-logs", "Prefix for logging index (e.g. systemd-logs)")
|
||||
templateCmd.Flags().Int("port", 5045, "Logstash port")
|
||||
templateCmd.Flags().StringToString("fields", nil, "Additional fields for the Logstash pipeline")
|
||||
|
||||
return templateCmd
|
||||
}
|
||||
|
||||
func runTemplate(cmd *cobra.Command, _ []string) error {
|
||||
flags, err := parseTemplateFlags(cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse template flags: %w", err)
|
||||
}
|
||||
|
||||
if err := flags.extraFields.Check(); err != nil {
|
||||
return fmt.Errorf("validating extra fields: %w", err)
|
||||
}
|
||||
|
||||
logstashPreparer := internal.NewLogstashPreparer(
|
||||
flags.extraFields,
|
||||
flags.username,
|
||||
flags.password,
|
||||
flags.indexPrefix,
|
||||
flags.port,
|
||||
)
|
||||
if err := logstashPreparer.Prepare(flags.dir); err != nil {
|
||||
return fmt.Errorf("prepare logstash: %w", err)
|
||||
}
|
||||
|
||||
filebeatPreparer := internal.NewFilebeatPreparer(
|
||||
flags.port,
|
||||
)
|
||||
if err := filebeatPreparer.Prepare(flags.dir); err != nil {
|
||||
return fmt.Errorf("prepare filebeat: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseTemplateFlags(cmd *cobra.Command) (templateFlags, error) {
|
||||
dir, err := cmd.Flags().GetString("dir")
|
||||
if err != nil {
|
||||
return templateFlags{}, fmt.Errorf("parse dir string: %w", err)
|
||||
}
|
||||
|
||||
username, err := cmd.Flags().GetString("username")
|
||||
if err != nil {
|
||||
return templateFlags{}, fmt.Errorf("parse username string: %w", err)
|
||||
}
|
||||
|
||||
password, err := cmd.Flags().GetString("password")
|
||||
if err != nil {
|
||||
return templateFlags{}, fmt.Errorf("parse password string: %w", err)
|
||||
}
|
||||
|
||||
indexPrefix, err := cmd.Flags().GetString("index-prefix")
|
||||
if err != nil {
|
||||
return templateFlags{}, fmt.Errorf("parse index-prefix string: %w", err)
|
||||
}
|
||||
|
||||
extraFields, err := cmd.Flags().GetStringToString("fields")
|
||||
if err != nil {
|
||||
return templateFlags{}, fmt.Errorf("parse fields map: %w", err)
|
||||
}
|
||||
|
||||
port, err := cmd.Flags().GetInt("port")
|
||||
if err != nil {
|
||||
return templateFlags{}, fmt.Errorf("parse port int: %w", err)
|
||||
}
|
||||
|
||||
return templateFlags{
|
||||
dir: dir,
|
||||
username: username,
|
||||
password: password,
|
||||
indexPrefix: indexPrefix,
|
||||
extraFields: extraFields,
|
||||
port: port,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type templateFlags struct {
|
||||
dir string
|
||||
username string
|
||||
password string
|
||||
indexPrefix string
|
||||
extraFields fields.Fields
|
||||
port int
|
||||
}
|
||||
|
||||
func must(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
8
hack/logcollector/fields/BUILD.bazel
Normal file
8
hack/logcollector/fields/BUILD.bazel
Normal file
@ -0,0 +1,8 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "fields",
|
||||
srcs = ["fields.go"],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/hack/logcollector/fields",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
71
hack/logcollector/fields/fields.go
Normal file
71
hack/logcollector/fields/fields.go
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
package fields
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// THIS FILE IS A DUPLICATE OF debugd/internal/debugd/logcollector/fields.go
|
||||
|
||||
var (
|
||||
// DebugdLogcollectPrefix is the prefix for all OpenSearch fields specified by the user when starting through debugd.
|
||||
DebugdLogcollectPrefix = "logcollect."
|
||||
// AllowedFields are the fields that are allowed to be used in the logcollection.
|
||||
AllowedFields = map[string]struct{}{
|
||||
"admin": {}, // name of the person running the cdbg command
|
||||
"is_debug_cluster": {}, // whether the cluster is a debug cluster
|
||||
// GitHub workflow information, see https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables
|
||||
"github.actor": {},
|
||||
"github.workflow": {},
|
||||
"github.run-id": {},
|
||||
"github.run-attempt": {},
|
||||
"github.ref-name": {},
|
||||
"github.sha": {},
|
||||
"github.runner-os": {},
|
||||
"github.e2e-test-payload": {},
|
||||
"github.is-debug-cluster": {},
|
||||
// cloud provider used in e2e test. If deployed with debugd, this is a duplicate as its also
|
||||
// available in the metadata. If deployed through K8s in e2e tests with a stable image, this
|
||||
// is where the cloud provider is saved in.
|
||||
"github.e2e-test-provider": {},
|
||||
"deployment-type": {}, // deployment type, e.g. "debugd", "k8s"
|
||||
}
|
||||
)
|
||||
|
||||
// FromMap returns new Fields from the given map.
|
||||
func FromMap(m map[string]string) Fields {
|
||||
return Fields(m)
|
||||
}
|
||||
|
||||
// Fields are the OpenSearch fields that are associated with a log message.
|
||||
type Fields map[string]string
|
||||
|
||||
// Extend adds the fields from other to f and returns the result.
|
||||
func (f Fields) Extend(other Fields) Fields {
|
||||
for k, v := range other {
|
||||
f[k] = v
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// Check checks whether all the fields in f are allowed. For fields that are prefixed with the debugd logcollect prefix are
|
||||
// only the subkeys are checked.
|
||||
func (f Fields) Check() error {
|
||||
for k := range f {
|
||||
if !strings.HasPrefix(k, DebugdLogcollectPrefix) {
|
||||
continue
|
||||
}
|
||||
subkey := strings.TrimPrefix(k, DebugdLogcollectPrefix)
|
||||
|
||||
if _, ok := AllowedFields[subkey]; !ok {
|
||||
return fmt.Errorf("invalid subkey %q for info key %q", subkey, fmt.Sprintf("%s%s", DebugdLogcollectPrefix, k))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
23
hack/logcollector/internal/BUILD.bazel
Normal file
23
hack/logcollector/internal/BUILD.bazel
Normal file
@ -0,0 +1,23 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "internal",
|
||||
srcs = [
|
||||
"filebeat.go",
|
||||
"logstash.go",
|
||||
"prepare.go",
|
||||
],
|
||||
embedsrcs = [
|
||||
"templates/filebeat/values.yml",
|
||||
"templates/logstash/values.yml",
|
||||
],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/hack/logcollector/internal",
|
||||
visibility = ["//hack/logcollector:__subpackages__"],
|
||||
deps = [
|
||||
"//debugd/filebeat",
|
||||
"//debugd/logstash",
|
||||
"//internal/file",
|
||||
"@com_github_spf13_afero//:afero",
|
||||
"@in_gopkg_yaml_v3//:yaml_v3",
|
||||
],
|
||||
)
|
119
hack/logcollector/internal/filebeat.go
Normal file
119
hack/logcollector/internal/filebeat.go
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
package internal
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/debugd/filebeat"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed templates/filebeat/*
|
||||
filebeatHelmAssets embed.FS
|
||||
|
||||
filebeatAssets = filebeat.Assets
|
||||
)
|
||||
|
||||
// FilebeatPreparer prepares the Filebeat Helm chart.
|
||||
type FilebeatPreparer struct {
|
||||
fh file.Handler
|
||||
port int
|
||||
templatePreparer
|
||||
}
|
||||
|
||||
// NewFilebeatPreparer returns a new FilebeatPreparer.
|
||||
func NewFilebeatPreparer(port int) *FilebeatPreparer {
|
||||
return &FilebeatPreparer{
|
||||
fh: file.NewHandler(afero.NewOsFs()),
|
||||
port: port,
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare prepares the Filebeat Helm chart by templating the filebeat.yml and inputs.yml files and placing them in the specified directory.
|
||||
func (p *FilebeatPreparer) Prepare(dir string) error {
|
||||
templatedFilebeatYaml, err := p.template(filebeatAssets, "templates/filebeat.yml", FilebeatTemplateData{
|
||||
LogstashHost: fmt.Sprintf("logstash-logstash:%d", p.port),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("template filebeat.yml: %w", err)
|
||||
}
|
||||
|
||||
inputsYaml, err := filebeatAssets.ReadFile("inputs.yml")
|
||||
if err != nil {
|
||||
return fmt.Errorf("read log4j2.properties: %w", err)
|
||||
}
|
||||
|
||||
rawHelmValues, err := filebeatHelmAssets.ReadFile("templates/filebeat/values.yml")
|
||||
if err != nil {
|
||||
return fmt.Errorf("read values.yml: %w", err)
|
||||
}
|
||||
|
||||
helmValuesYaml := &FilebeatHelmValues{}
|
||||
if err := yaml.Unmarshal(rawHelmValues, helmValuesYaml); err != nil {
|
||||
return fmt.Errorf("unmarshal values.yml: %w", err)
|
||||
}
|
||||
|
||||
helmValuesYaml.Daemonset.FilebeatConfig.FilebeatYml = templatedFilebeatYaml.String()
|
||||
helmValuesYaml.Daemonset.FilebeatConfig.InputsYml = string(inputsYaml)
|
||||
|
||||
helmValues, err := yaml.Marshal(helmValuesYaml)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal values.yml: %w", err)
|
||||
}
|
||||
|
||||
if err = p.fh.Write(filepath.Join(dir, "filebeat", "values.yml"), helmValues, file.OptMkdirAll); err != nil {
|
||||
return fmt.Errorf("write values.yml: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilebeatTemplateData is template data.
|
||||
type FilebeatTemplateData struct {
|
||||
LogstashHost string
|
||||
}
|
||||
|
||||
// FilebeatHelmValues repesents the Helm values.yml.
|
||||
type FilebeatHelmValues struct {
|
||||
Image string `yaml:"image"`
|
||||
ImageTag string `yaml:"imageTag"`
|
||||
Daemonset struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
FilebeatConfig struct {
|
||||
FilebeatYml string `yaml:"filebeat.yml"`
|
||||
InputsYml string `yaml:"inputs.yml"`
|
||||
} `yaml:"filebeatConfig"`
|
||||
ExtraEnvs []interface{} `yaml:"extraEnvs"`
|
||||
SecretMounts []interface{} `yaml:"secretMounts"`
|
||||
Tolerations []struct {
|
||||
Key string `yaml:"key"`
|
||||
Operator string `yaml:"operator"`
|
||||
Effect string `yaml:"effect"`
|
||||
} `yaml:"tolerations"`
|
||||
SecurityContext struct {
|
||||
Privileged bool `yaml:"privileged"`
|
||||
RunAsUser int `yaml:"runAsUser"`
|
||||
} `yaml:"securityContext"`
|
||||
ExtraVolumeMounts []struct {
|
||||
Name string `yaml:"name"`
|
||||
MountPath string `yaml:"mountPath"`
|
||||
ReadOnly bool `yaml:"readOnly"`
|
||||
} `yaml:"extraVolumeMounts"`
|
||||
ExtraVolumes []struct {
|
||||
Name string `yaml:"name"`
|
||||
HostPath struct {
|
||||
Path string `yaml:"path"`
|
||||
Type string `yaml:"type"`
|
||||
} `yaml:"hostPath"`
|
||||
} `yaml:"extraVolumes"`
|
||||
} `yaml:"daemonset"`
|
||||
}
|
146
hack/logcollector/internal/logstash.go
Normal file
146
hack/logcollector/internal/logstash.go
Normal file
@ -0,0 +1,146 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
package internal
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/debugd/logstash"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed templates/logstash/*
|
||||
logstashHelmAssets embed.FS
|
||||
|
||||
logstashAssets = logstash.Assets
|
||||
)
|
||||
|
||||
const (
|
||||
openSearchHost = "https://search-e2e-logs-y46renozy42lcojbvrt3qq7csm.eu-central-1.es.amazonaws.com:443"
|
||||
)
|
||||
|
||||
// LogstashPreparer prepares the Logstash Helm chart.
|
||||
type LogstashPreparer struct {
|
||||
fh file.Handler
|
||||
fields map[string]string
|
||||
indexPrefix string
|
||||
username string
|
||||
password string
|
||||
port int
|
||||
templatePreparer
|
||||
}
|
||||
|
||||
// NewLogstashPreparer returns a new LogstashPreparer.
|
||||
func NewLogstashPreparer(fields map[string]string, username, password, indexPrefix string, port int) *LogstashPreparer {
|
||||
return &LogstashPreparer{
|
||||
username: username,
|
||||
password: password,
|
||||
indexPrefix: indexPrefix,
|
||||
fields: fields,
|
||||
fh: file.NewHandler(afero.NewOsFs()),
|
||||
port: port,
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare prepares the Logstash Helm chart by templating the required files and placing them in the specified directory.
|
||||
func (p *LogstashPreparer) Prepare(dir string) error {
|
||||
templatedPipelineConf, err := p.template(logstashAssets, "templates/pipeline.conf", pipelineConfTemplate{
|
||||
InfoMap: p.fields,
|
||||
Host: openSearchHost,
|
||||
IndexPrefix: p.indexPrefix,
|
||||
Credentials: Credentials{
|
||||
Username: p.username,
|
||||
Password: p.password,
|
||||
},
|
||||
Port: p.port,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("template pipeline.conf: %w", err)
|
||||
}
|
||||
|
||||
logstashYaml, err := logstashAssets.ReadFile("config/logstash.yml")
|
||||
if err != nil {
|
||||
return fmt.Errorf("read logstash.yml: %w", err)
|
||||
}
|
||||
|
||||
log4jProperties, err := logstashAssets.ReadFile("config/log4j2.properties")
|
||||
if err != nil {
|
||||
return fmt.Errorf("read log4j2.properties: %w", err)
|
||||
}
|
||||
|
||||
rawHelmValues, err := logstashHelmAssets.ReadFile("templates/logstash/values.yml")
|
||||
if err != nil {
|
||||
return fmt.Errorf("read values.yml: %w", err)
|
||||
}
|
||||
|
||||
helmValuesYaml := &LogstashHelmValues{}
|
||||
if err := yaml.Unmarshal(rawHelmValues, helmValuesYaml); err != nil {
|
||||
return fmt.Errorf("unmarshal values.yml: %w", err)
|
||||
}
|
||||
|
||||
helmValuesYaml.LogstashConfig.LogstashYml = helmValuesYaml.LogstashConfig.LogstashYml + string(logstashYaml)
|
||||
helmValuesYaml.LogstashConfig.Log4J2Properties = string(log4jProperties)
|
||||
helmValuesYaml.LogstashPipeline.LogstashConf = templatedPipelineConf.String()
|
||||
helmValuesYaml.Service.Ports[0].Port = p.port
|
||||
helmValuesYaml.Service.Ports[0].TargetPort = p.port
|
||||
|
||||
helmValues, err := yaml.Marshal(helmValuesYaml)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal values.yml: %w", err)
|
||||
}
|
||||
|
||||
if err = p.fh.Write(filepath.Join(dir, "logstash", "values.yml"), helmValues, file.OptMkdirAll); err != nil {
|
||||
return fmt.Errorf("write values.yml: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LogstashHelmValues represents the values.yml file for the Logstash Helm chart.
|
||||
type LogstashHelmValues struct {
|
||||
Image string `yaml:"image"`
|
||||
ImageTag string `yaml:"imageTag"`
|
||||
LogstashConfig struct {
|
||||
LogstashYml string `yaml:"logstash.yml"`
|
||||
Log4J2Properties string `yaml:"log4j2.properties"`
|
||||
} `yaml:"logstashConfig"`
|
||||
LogstashPipeline struct {
|
||||
LogstashConf string `yaml:"logstash.conf"`
|
||||
} `yaml:"logstashPipeline"`
|
||||
Service struct {
|
||||
Ports []struct {
|
||||
Name string `yaml:"name"`
|
||||
Port int `yaml:"port"`
|
||||
Protocol string `yaml:"protocol"`
|
||||
TargetPort int `yaml:"targetPort"`
|
||||
} `yaml:"ports"`
|
||||
} `yaml:"service"`
|
||||
Tolerations []struct {
|
||||
Key string `yaml:"key"`
|
||||
Operator string `yaml:"operator"`
|
||||
Effect string `yaml:"effect"`
|
||||
} `yaml:"tolerations"`
|
||||
}
|
||||
|
||||
// pipelineConfTemplate is template Data.
|
||||
type pipelineConfTemplate struct {
|
||||
InfoMap map[string]string
|
||||
Host string
|
||||
IndexPrefix string
|
||||
Credentials Credentials
|
||||
Port int
|
||||
}
|
||||
|
||||
// Credentials is template Data.
|
||||
type Credentials struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
30
hack/logcollector/internal/prepare.go
Normal file
30
hack/logcollector/internal/prepare.go
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"embed"
|
||||
"fmt"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type templatePreparer struct{}
|
||||
|
||||
func (p templatePreparer) template(fs embed.FS, templateFile string, templateData any) (*bytes.Buffer, error) {
|
||||
templates, err := template.ParseFS(fs, templateFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse templates: %w", err)
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
if err = templates.Execute(buf, templateData); err != nil {
|
||||
return nil, fmt.Errorf("execute template: %w", err)
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
40
hack/logcollector/internal/templates/filebeat/values.yml
Normal file
40
hack/logcollector/internal/templates/filebeat/values.yml
Normal file
@ -0,0 +1,40 @@
|
||||
image: ghcr.io/edgelesssys/beats/filebeat-oss
|
||||
imageTag: 8.10.0@sha256:655b2162104dc00b912a1bc91e6c437f4baf21b5aaef3464a1ac3e47251d8e01
|
||||
|
||||
daemonset:
|
||||
enabled: true
|
||||
filebeatConfig:
|
||||
filebeat.yml: ""
|
||||
inputs.yml: ""
|
||||
extraEnvs: []
|
||||
secretMounts: []
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
privileged: true
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
extraVolumeMounts:
|
||||
- name: runsystemd
|
||||
mountPath: /run/systemd
|
||||
readOnly: true
|
||||
- name: machine-id
|
||||
mountPath: /etc/machine-id
|
||||
readOnly: true
|
||||
- name: runlogjournal
|
||||
mountPath: /run/log/journal
|
||||
readOnly: true
|
||||
extraVolumes:
|
||||
- name: runsystemd
|
||||
hostPath:
|
||||
path: /run/systemd
|
||||
type: ""
|
||||
- name: machine-id
|
||||
hostPath:
|
||||
path: /etc/machine-id
|
||||
type: ""
|
||||
- name: runlogjournal
|
||||
hostPath:
|
||||
path: /run/log/journal
|
||||
type: ""
|
19
hack/logcollector/internal/templates/logstash/values.yml
Normal file
19
hack/logcollector/internal/templates/logstash/values.yml
Normal file
@ -0,0 +1,19 @@
|
||||
image: docker.io/opensearchproject/logstash-oss-with-opensearch-output-plugin
|
||||
imageTag: 8.6.1@sha256:dc060b364d600858fca6b86f9217bdd0c28ecbc5c1a0636538dd6d946422a421
|
||||
|
||||
logstashConfig:
|
||||
logstash.yml: |
|
||||
# explicitly add host as per https://github.com/elastic/helm-charts/blob/2fd64d0af65f14df7aa01da591919460dabac4b3/logstash/values.yaml#L7
|
||||
http.host: 0.0.0.0
|
||||
|
||||
service:
|
||||
ports:
|
||||
- name: beats
|
||||
port: 5045
|
||||
protocol: TCP
|
||||
targetPort: 5045
|
||||
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
19
hack/logcollector/main.go
Normal file
19
hack/logcollector/main.go
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/hack/logcollector/cmd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := cmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
@ -113,9 +113,9 @@ const (
|
||||
// NodeMaintenanceOperatorImage is the image for the node maintenance operator.
|
||||
NodeMaintenanceOperatorImage = "quay.io/medik8s/node-maintenance-operator:v0.15.0@sha256:8cb8dad93283268282c30e75c68f4bd76b28def4b68b563d2f9db9c74225d634" // renovate:container
|
||||
// LogstashImage is the container image of logstash, used for log collection by debugd.
|
||||
LogstashImage = "ghcr.io/edgelesssys/constellation/logstash-debugd:v2.7.0-pre.0.20230405123345-6bf3c63115a5@sha256:1e2c396538be7571138272f8a54e3412d4ff91ee370880f89894501a2555706a" // renovate:container
|
||||
LogstashImage = "ghcr.io/edgelesssys/constellation/logstash-debugd:v2.9.0-pre.0.20230710124918-df09e04e0b4c@sha256:f3bad95b8f85801d61c7791a46488d75d942ef610f289d3362cfe09505cef6c8" // renovate:container
|
||||
// FilebeatImage is the container image of filebeat, used for log collection by debugd.
|
||||
FilebeatImage = "ghcr.io/edgelesssys/constellation/filebeat-debugd:v2.7.0-pre.0.20230405123345-6bf3c63115a5@sha256:abd739853af4981c3a4b338bb3a27433284525d7ebdb84adfc77f1873c41de93" // renovate:container
|
||||
FilebeatImage = "ghcr.io/edgelesssys/constellation/filebeat-debugd:v2.9.0-pre.0.20230710124918-df09e04e0b4c@sha256:438be5705d1886a5d85724cf55f5d7f05c240b4bd4680eff5f532fc346ad02ae" // renovate:container
|
||||
|
||||
// currently supported versions.
|
||||
//nolint:revive
|
||||
|
Loading…
Reference in New Issue
Block a user