constellation/debugd/logstash/templates/pipeline.conf

67 lines
1.8 KiB
Plaintext
Raw Normal View History

input {
beats {
host => "0.0.0.0"
port => {{ .Port }}
}
}
filter {
mutate {
# Remove some fields that are not needed.
remove_field => [
"[agent]",
"[journald]",
"[syslog]",
"[systemd][invocation_id]",
"[event][original]",
"[log][offset]",
"[log][syslog]"
]
# Tag with the provided metadata.
add_field => {
{{ range $key, $value := .InfoMap }}
"[metadata][{{ $key }}]" => "{{ $value }}"
{{ end }}
}
}
# Parse structured logs for following systemd units.
if [systemd][unit] in ["bootstrapper.service", "constellation-bootstrapper.service"] {
# skip_on_invalid_json below does not skip the whole filter, so let's use a cheap workaround here.
# See:
# https://discuss.elastic.co/t/skip-on-invalid-json-skipping-all-filters/215195
# https://discuss.elastic.co/t/looking-for-a-way-to-detect-json/102263
if [message] =~ "\A\{.+\}\z" {
json {
source => "message"
target => "logs"
skip_on_invalid_json => true
}
mutate {
replace => {
"message" => "%{[logs][msg]}"
}
remove_field => [
"[logs][msg]",
"[logs][ts]"
]
}
de_dot {
fields => ["[logs][peer.address]"]
}
}
}
}
output {
opensearch {
hosts => "{{ .Host }}"
index => "systemd-logs-%{+YYYY.MM.dd}"
user => "{{ .Credentials.Username }}"
password => "{{ .Credentials.Password }}"
ssl => true
ssl_certificate_verification => true
}
}