From 6edae9663349f74e857b6f19ea335ef6a2f26cc5 Mon Sep 17 00:00:00 2001 From: Nick Poole <56344524+NickPoole@users.noreply.github.com> Date: Mon, 30 Sep 2024 12:45:06 +0100 Subject: [PATCH] Initial commit --- .env | 42 ++ .gitattributes | 2 + .github/ISSUE_TEMPLATE/config.yml | 11 + .github/ISSUE_TEMPLATE/issue_report.md | 87 +++ .github/dependabot.yml | 14 + .github/workflows/ci.yml | 200 +++++++ .github/workflows/docs.yml | 29 + .github/workflows/lint/markdown.yaml | 152 ++++++ .github/workflows/mlc_config.json | 5 + .../scripts/disable-disk-alloc-decider.sh | 31 ++ .github/workflows/scripts/lib/testing.sh | 138 +++++ .github/workflows/scripts/run-tests-core.sh | 106 ++++ .../scripts/run-tests-enterprise-search.sh | 32 ++ .../workflows/scripts/run-tests-filebeat.sh | 63 +++ .github/workflows/scripts/run-tests-fleet.sh | 113 ++++ .../workflows/scripts/run-tests-heartbeat.sh | 63 +++ .../workflows/scripts/run-tests-metricbeat.sh | 66 +++ .github/workflows/spam-issue-close.yml | 57 ++ .github/workflows/update-merge.yml | 42 ++ .github/workflows/update.yml | 103 ++++ LICENSE | 21 + README.md | 498 ++++++++++++++++++ docker-compose.yml | 112 ++++ elasticsearch/.dockerignore | 6 + elasticsearch/Dockerfile | 7 + elasticsearch/config/elasticsearch.yml | 12 + extensions/README.md | 3 + extensions/curator/.dockerignore | 6 + extensions/curator/Dockerfile | 9 + extensions/curator/README.md | 20 + extensions/curator/config/curator.yml | 13 + .../config/delete_log_files_curator.yml | 21 + extensions/curator/curator-compose.yml | 14 + extensions/enterprise-search/.dockerignore | 6 + extensions/enterprise-search/Dockerfile | 4 + extensions/enterprise-search/README.md | 144 +++++ .../config/enterprise-search.yml | 28 + .../enterprise-search-compose.yml | 18 + extensions/filebeat/.dockerignore | 6 + extensions/filebeat/Dockerfile | 3 + extensions/filebeat/README.md | 42 ++ extensions/filebeat/config/filebeat.yml | 54 ++ extensions/filebeat/filebeat-compose.yml | 33 ++ extensions/fleet/.dockerignore | 6 + extensions/fleet/Dockerfile | 8 + extensions/fleet/README.md | 62 +++ extensions/fleet/agent-apmserver-compose.yml | 43 ++ extensions/fleet/fleet-compose.yml | 45 ++ extensions/heartbeat/.dockerignore | 6 + extensions/heartbeat/Dockerfile | 3 + extensions/heartbeat/README.md | 41 ++ extensions/heartbeat/config/heartbeat.yml | 40 ++ extensions/heartbeat/heartbeat-compose.yml | 22 + extensions/metricbeat/.dockerignore | 6 + extensions/metricbeat/Dockerfile | 3 + extensions/metricbeat/README.md | 49 ++ extensions/metricbeat/config/metricbeat.yml | 72 +++ extensions/metricbeat/metricbeat-compose.yml | 45 ++ kibana/.dockerignore | 6 + kibana/Dockerfile | 7 + kibana/config/kibana.yml | 99 ++++ logstash/.dockerignore | 6 + logstash/Dockerfile | 7 + logstash/config/logstash.yml | 7 + logstash/pipeline/logstash.conf | 19 + setup/.dockerignore | 9 + setup/Dockerfile | 6 + setup/entrypoint.sh | 119 +++++ setup/lib.sh | 240 +++++++++ setup/roles/filebeat_writer.json | 20 + setup/roles/heartbeat_writer.json | 18 + setup/roles/logstash_writer.json | 33 ++ setup/roles/metricbeat_writer.json | 19 + 73 files changed, 3502 insertions(+) create mode 100644 .env create mode 100644 .gitattributes create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/issue_report.md create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/docs.yml create mode 100644 .github/workflows/lint/markdown.yaml create mode 100644 .github/workflows/mlc_config.json create mode 100755 .github/workflows/scripts/disable-disk-alloc-decider.sh create mode 100755 .github/workflows/scripts/lib/testing.sh create mode 100755 .github/workflows/scripts/run-tests-core.sh create mode 100755 .github/workflows/scripts/run-tests-enterprise-search.sh create mode 100755 .github/workflows/scripts/run-tests-filebeat.sh create mode 100755 .github/workflows/scripts/run-tests-fleet.sh create mode 100755 .github/workflows/scripts/run-tests-heartbeat.sh create mode 100755 .github/workflows/scripts/run-tests-metricbeat.sh create mode 100644 .github/workflows/spam-issue-close.yml create mode 100644 .github/workflows/update-merge.yml create mode 100644 .github/workflows/update.yml create mode 100644 LICENSE create mode 100644 README.md create mode 100644 docker-compose.yml create mode 100644 elasticsearch/.dockerignore create mode 100644 elasticsearch/Dockerfile create mode 100644 elasticsearch/config/elasticsearch.yml create mode 100644 extensions/README.md create mode 100644 extensions/curator/.dockerignore create mode 100644 extensions/curator/Dockerfile create mode 100644 extensions/curator/README.md create mode 100644 extensions/curator/config/curator.yml create mode 100644 extensions/curator/config/delete_log_files_curator.yml create mode 100644 extensions/curator/curator-compose.yml create mode 100644 extensions/enterprise-search/.dockerignore create mode 100644 extensions/enterprise-search/Dockerfile create mode 100644 extensions/enterprise-search/README.md create mode 100644 extensions/enterprise-search/config/enterprise-search.yml create mode 100644 extensions/enterprise-search/enterprise-search-compose.yml create mode 100644 extensions/filebeat/.dockerignore create mode 100644 extensions/filebeat/Dockerfile create mode 100644 extensions/filebeat/README.md create mode 100644 extensions/filebeat/config/filebeat.yml create mode 100644 extensions/filebeat/filebeat-compose.yml create mode 100644 extensions/fleet/.dockerignore create mode 100644 extensions/fleet/Dockerfile create mode 100644 extensions/fleet/README.md create mode 100644 extensions/fleet/agent-apmserver-compose.yml create mode 100644 extensions/fleet/fleet-compose.yml create mode 100644 extensions/heartbeat/.dockerignore create mode 100644 extensions/heartbeat/Dockerfile create mode 100644 extensions/heartbeat/README.md create mode 100644 extensions/heartbeat/config/heartbeat.yml create mode 100644 extensions/heartbeat/heartbeat-compose.yml create mode 100644 extensions/metricbeat/.dockerignore create mode 100644 extensions/metricbeat/Dockerfile create mode 100644 extensions/metricbeat/README.md create mode 100644 extensions/metricbeat/config/metricbeat.yml create mode 100644 extensions/metricbeat/metricbeat-compose.yml create mode 100644 kibana/.dockerignore create mode 100644 kibana/Dockerfile create mode 100644 kibana/config/kibana.yml create mode 100644 logstash/.dockerignore create mode 100644 logstash/Dockerfile create mode 100644 logstash/config/logstash.yml create mode 100644 logstash/pipeline/logstash.conf create mode 100644 setup/.dockerignore create mode 100644 setup/Dockerfile create mode 100755 setup/entrypoint.sh create mode 100644 setup/lib.sh create mode 100644 setup/roles/filebeat_writer.json create mode 100644 setup/roles/heartbeat_writer.json create mode 100644 setup/roles/logstash_writer.json create mode 100644 setup/roles/metricbeat_writer.json diff --git a/.env b/.env new file mode 100644 index 0000000..576f904 --- /dev/null +++ b/.env @@ -0,0 +1,42 @@ +ELASTIC_VERSION=8.15.1 + +## Passwords for stack users +# + +# User 'elastic' (built-in) +# +# Superuser role, full access to cluster management and data indices. +# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html +ELASTIC_PASSWORD='changeme' + +# User 'logstash_internal' (custom) +# +# The user Logstash uses to connect and send data to Elasticsearch. +# https://www.elastic.co/guide/en/logstash/current/ls-security.html +LOGSTASH_INTERNAL_PASSWORD='changeme' + +# User 'kibana_system' (built-in) +# +# The user Kibana uses to connect and communicate with Elasticsearch. +# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html +KIBANA_SYSTEM_PASSWORD='changeme' + +# Users 'metricbeat_internal', 'filebeat_internal' and 'heartbeat_internal' (custom) +# +# The users Beats use to connect and send data to Elasticsearch. +# https://www.elastic.co/guide/en/beats/metricbeat/current/feature-roles.html +METRICBEAT_INTERNAL_PASSWORD='' +FILEBEAT_INTERNAL_PASSWORD='' +HEARTBEAT_INTERNAL_PASSWORD='' + +# User 'monitoring_internal' (custom) +# +# The user Metricbeat uses to collect monitoring data from stack components. +# https://www.elastic.co/guide/en/elasticsearch/reference/current/how-monitoring-works.html +MONITORING_INTERNAL_PASSWORD='' + +# User 'beats_system' (built-in) +# +# The user the Beats use when storing monitoring information in Elasticsearch. +# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html +BEATS_SYSTEM_PASSWORD='' diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..2858dda --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# Declare files that will always have LF line endings on checkout. +*.sh text eol=lf \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..e30f9d2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +blank_issues_enabled: false +contact_links: +- name: Discuss the Elastic Stack + url: https://discuss.elastic.co + about: Please ask questions related to the usage of Elastic products in those forums. +- name: Docker Community Forums + url: https://forums.docker.com + about: Please ask questions related to the usage of Docker products in those forums. +- name: docker-elk Gitter chat room + url: https://app.gitter.im/#/room/#deviantony_docker-elk:gitter.im + about: General questions regarding this project can also be asked in the chat. diff --git a/.github/ISSUE_TEMPLATE/issue_report.md b/.github/ISSUE_TEMPLATE/issue_report.md new file mode 100644 index 0000000..2d0841d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/issue_report.md @@ -0,0 +1,87 @@ +--- +name: Issue report +about: Report a problem with the docker-elk integration or its documentation. +--- + + + + +### Problem description + + + +### Extra information + +#### Stack configuration + + + +#### Docker setup + + + +```console +$ docker version + +[OUTPUT HERE] +``` + + + +```console +$ docker-compose version + +[OUTPUT HERE] +``` + +#### Container logs + + + +```console +$ docker-compose logs + +[OUTPUT HERE] +``` diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..97bdac6 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,14 @@ +# Dependabot configuration +# +# For more information, please refer to: +# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates + +version: 2 + +updates: + +# Maintain dependencies for GitHub Actions +- package-ecosystem: github-actions + directory: / + schedule: + interval: weekly diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..594eb3a --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,200 @@ +name: CI + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + + test: + name: Test suite + # List of supported runners: + # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners + runs-on: ubuntu-22.04 + + env: + COMPOSE_PROJECT_NAME: docker-elk + + steps: + - uses: actions/checkout@v4 + + ##################################################### + # # + # Install all dependencies required by test suites. # + # # + ##################################################### + + - name: Pre-build container images + run: >- + docker compose + -f docker-compose.yml + -f extensions/fleet/fleet-compose.yml + -f extensions/fleet/agent-apmserver-compose.yml + -f extensions/metricbeat/metricbeat-compose.yml + -f extensions/filebeat/filebeat-compose.yml + -f extensions/heartbeat/heartbeat-compose.yml + -f extensions/enterprise-search/enterprise-search-compose.yml + build + + ######################################################## + # # + # Ensure §"Initial setup" of the README remains valid. # + # # + ######################################################## + + - name: Set password of every built-in user to 'testpasswd' + run: >- + sed -i + -e 's/\(ELASTIC_PASSWORD=\)'\''changeme'\''/\1testpasswd/g' + -e 's/\(LOGSTASH_INTERNAL_PASSWORD=\)'\''changeme'\''/\1testpasswd/g' + -e 's/\(KIBANA_SYSTEM_PASSWORD=\)'\''changeme'\''/\1testpasswd/g' + -e 's/\(METRICBEAT_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g' + -e 's/\(FILEBEAT_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g' + -e 's/\(HEARTBEAT_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g' + -e 's/\(MONITORING_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g' + -e 's/\(BEATS_SYSTEM_PASSWORD=\)'\'\''/\1testpasswd/g' + .env + + - name: Set up users and roles + run: docker compose up setup + + ############################# + # # + # Test core and extensions. # + # # + ############################# + + # Elasticsearch's high disk watermark gets regularly exceeded on GitHub Actions runners. + # https://www.elastic.co/guide/en/elasticsearch/reference/8.10/fix-watermark-errors.html + - name: Disable Elasticsearch disk allocation decider + run: .github/workflows/scripts/disable-disk-alloc-decider.sh + + # + # Core components: Elasticsearch, Logstash, Kibana + # + + - name: Execute core test suite + run: | + docker compose up -d + .github/workflows/scripts/run-tests-core.sh + # next steps don't need Logstash + docker compose stop logstash + + # + # Fleet + # + + - name: Execute Fleet test suite + run: | + docker compose -f docker-compose.yml -f extensions/fleet/fleet-compose.yml -f extensions/fleet/agent-apmserver-compose.yml up --remove-orphans -d fleet-server apm-server + .github/workflows/scripts/run-tests-fleet.sh + + # + # Metricbeat + # + + - name: Execute Metricbeat test suite + run: | + docker compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml up --remove-orphans -d metricbeat + .github/workflows/scripts/run-tests-metricbeat.sh + + # + # Filebeat + # + + - name: Execute Filebeat test suite + run: | + docker compose -f docker-compose.yml -f extensions/filebeat/filebeat-compose.yml up --remove-orphans -d filebeat + .github/workflows/scripts/run-tests-filebeat.sh + + # + # Heartbeat + # + + - name: Execute Heartbeat test suite + run: | + docker compose -f docker-compose.yml -f extensions/heartbeat/heartbeat-compose.yml up --remove-orphans -d heartbeat + .github/workflows/scripts/run-tests-heartbeat.sh + + # + # Enterprise Search + # + + - name: Execute Enterprise Search test suite + run: | + + # Set mandatory Elasticsearch settings + + sed -i '$ a xpack.security.authc.api_key.enabled: true' elasticsearch/config/elasticsearch.yml + + # Restart Elasticsearch for changes to take effect + + docker compose restart elasticsearch + + # Run Enterprise Search and execute tests + + sed -i 's/\(secret_management.encryption_keys:\)/\1 [test-encrypt]/g' extensions/enterprise-search/config/enterprise-search.yml + + docker compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up --remove-orphans -d enterprise-search + .github/workflows/scripts/run-tests-enterprise-search.sh + + # Revert changes to Elasticsearch configuration + + sed -i '/xpack.security.authc.api_key.enabled: true/d' elasticsearch/config/elasticsearch.yml + docker compose restart elasticsearch + + - name: Collect troubleshooting data + id: debug-data + if: failure() + run: | + declare debug_data_dir="$(mktemp -d)" + + docker compose \ + -f docker-compose.yml \ + -f extensions/fleet/fleet-compose.yml \ + -f extensions/fleet/agent-apmserver-compose.yml \ + -f extensions/metricbeat/metricbeat-compose.yml \ + -f extensions/filebeat/filebeat-compose.yml \ + -f extensions/heartbeat/heartbeat-compose.yml \ + -f extensions/enterprise-search/enterprise-search-compose.yml \ + ps >"$debug_data_dir"/docker_ps.log + + docker compose \ + -f docker-compose.yml \ + -f extensions/fleet/fleet-compose.yml \ + -f extensions/fleet/agent-apmserver-compose.yml \ + -f extensions/metricbeat/metricbeat-compose.yml \ + -f extensions/filebeat/filebeat-compose.yml \ + -f extensions/heartbeat/heartbeat-compose.yml \ + -f extensions/enterprise-search/enterprise-search-compose.yml \ + logs >"$debug_data_dir"/docker_logs.log + + echo "path=${debug_data_dir}" >>"$GITHUB_OUTPUT" + + - name: Upload collected troubleshooting data + if: always() && steps.debug-data.outputs.path + uses: actions/upload-artifact@v4 + with: + name: debug-data + path: ${{ steps.debug-data.outputs.path }}/*.* + + ############## + # # + # Tear down. # + # # + ############## + + - name: Terminate all components + if: always() + run: >- + docker compose + -f docker-compose.yml + -f extensions/fleet/fleet-compose.yml + -f extensions/fleet/agent-apmserver-compose.yml + -f extensions/metricbeat/metricbeat-compose.yml + -f extensions/filebeat/filebeat-compose.yml + -f extensions/heartbeat/heartbeat-compose.yml + -f extensions/enterprise-search/enterprise-search-compose.yml + down -v diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000..2b39074 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,29 @@ +name: Documentation + +on: + schedule: + - cron: '0 0 * * 0' # At 00:00 every Sunday + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + + markdown-check: + name: Check Markdown + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Check links + uses: gaurav-nelson/github-action-markdown-link-check@v1 + with: + config-file: .github/workflows/mlc_config.json + + - name: Lint + uses: avto-dev/markdown-lint@v1 + with: + args: '**/*.md' + config: .github/workflows/lint/markdown.yaml diff --git a/.github/workflows/lint/markdown.yaml b/.github/workflows/lint/markdown.yaml new file mode 100644 index 0000000..dd9e569 --- /dev/null +++ b/.github/workflows/lint/markdown.yaml @@ -0,0 +1,152 @@ +default: false # includes/excludes all rules by default + +# Heading levels should only increment by one level at a time +MD001: true + +# Heading style +MD003: true + +# Unordered list style +MD004: true + +# Inconsistent indentation for list items at the same level +MD005: true + +# Consider starting bulleted lists at the beginning of the line +MD006: true + +# Unordered list indentation +MD007: true + +# Trailing spaces +MD009: true + +# Hard tabs +MD010: true + +# Reversed link syntax +MD011: true + +# Multiple consecutive blank lines +MD012: true + +# Line length +MD013: + line_length: 120 + code_blocks: false + +# Dollar signs used before commands without showing output +MD014: false + +# No space after hash on atx style heading +MD018: true + +# Multiple spaces after hash on atx style heading +MD019: true + +# No space inside hashes on closed atx style heading +MD020: true + +# Multiple spaces inside hashes on closed atx style heading +MD021: true + +# Headings should be surrounded by blank lines +MD022: true + +# Headings must start at the beginning of the line +MD023: true + +# Multiple headings with the same content +MD024: + allow_different_nesting: true + +# Multiple top level headings in the same document +MD025: true + +# Trailing punctuation in heading +MD026: true + +# Multiple spaces after blockquote symbol +MD027: true + +# Blank line inside blockquote +MD028: false + +# Ordered list item prefix +MD029: + style: 'one' + +# Spaces after list markers +MD030: true + +# Fenced code blocks should be surrounded by blank lines +MD031: true + +# Lists should be surrounded by blank lines +MD032: true + +# Inline HTML +MD033: true + +# Bare URL used +MD034: true + +# Horizontal rule style +MD035: + style: '---' + +# Emphasis used instead of a heading +MD036: true + +# Spaces inside emphasis markers +MD037: true + +# Spaces inside code span elements +MD038: true + +# Spaces inside link text +MD039: true + +# Fenced code blocks should have a language specified +MD040: true + +# First line in file should be a top level heading +MD041: true + +# No empty links +MD042: true + +# Required heading structure +MD043: false + +# Proper names should have the correct capitalization +MD044: + names: + - docker-elk + - Elasticsearch + - Logstash + - Kibana + - Docker + - Compose + - macOS + code_blocks: false + +# Images should have alternate text (alt text) +MD045: true + +# Code block style +MD046: + style: fenced + +# Files should end with a single newline character +MD047: true + +# Code fence style +MD048: + style: 'backtick' + +# Custom rules: +CHANGELOG-RULE-001: true +CHANGELOG-RULE-002: true +CHANGELOG-RULE-003: true +CHANGELOG-RULE-004: true diff --git a/.github/workflows/mlc_config.json b/.github/workflows/mlc_config.json new file mode 100644 index 0000000..6b37a94 --- /dev/null +++ b/.github/workflows/mlc_config.json @@ -0,0 +1,5 @@ +{ + "ignorePatterns": [ + { "pattern": "^http:\/\/localhost:" } + ] +} diff --git a/.github/workflows/scripts/disable-disk-alloc-decider.sh b/.github/workflows/scripts/disable-disk-alloc-decider.sh new file mode 100755 index 0000000..142176c --- /dev/null +++ b/.github/workflows/scripts/disable-disk-alloc-decider.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh + + +cid_es="$(container_id elasticsearch)" +ip_es="$(service_ip elasticsearch)" + +grouplog 'Wait for readiness of Elasticsearch' +poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd' +endgroup + +log 'Disabling disk allocation decider' + +declare -a put_args=( '-X' 'PUT' '--fail-with-body' '-s' '-u' 'elastic:testpasswd' + '-H' 'Content-Type: application/json' + 'http://elasticsearch:9200/_cluster/settings?pretty' + '--resolve' "elasticsearch:9200:${ip_es}" + '-d' '{"persistent":{"cluster.routing.allocation.disk.threshold_enabled":false}}' +) +declare response +declare -i exit_code=0 + +response=$(curl "${put_args[@]}") || exit_code=$? +echo "$response" + +exit $exit_code diff --git a/.github/workflows/scripts/lib/testing.sh b/.github/workflows/scripts/lib/testing.sh new file mode 100755 index 0000000..2d80992 --- /dev/null +++ b/.github/workflows/scripts/lib/testing.sh @@ -0,0 +1,138 @@ +#!/usr/bin/env bash + +# Log a message. +function log { + echo -e "\n[+] $1\n" +} + +# Log an error. +function err { + echo -e "\n[x] $1\n" >&2 +} + +# Start an expandable group in the GitHub Action log. +# https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#grouping-log-lines +function grouplog { + echo "::group::$1" +} + +# End the current expandable group in the GitHub Action log. +function endgroup { + echo '::endgroup::' +} + +# Return the ID of the container running the given service. +function container_id { + local svc=$1 + + local label="com.docker.compose.service=${svc}" + + local cid + + local -i was_retried=0 + + # retry for max 60s (30*2s) + for _ in $(seq 1 30); do + cid="$(docker container ls -aq -f label="$label")" + if [ -n "$cid" ]; then + break + fi + + was_retried=1 + echo -n '.' >&2 + sleep 2 + done + if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 + fi + + if [ -z "${cid:-}" ]; then + err "Timed out waiting for creation of container with label ${label}" + return 1 + fi + + echo "$cid" +} + +# Return the IP address at which a service can be reached. +# In Compose mode, returns the container's IP. +function service_ip { + local svc=$1 + + local ip + + local cid + cid="$(container_id "$svc")" + + local ip + + local -i was_retried=0 + + # retry for max 10s (5*2s) + for _ in $(seq 1 5); do + ip="$(docker container inspect "$cid" --format '{{ (index .NetworkSettings.Networks "docker-elk_elk").IPAddress }}')" + if [ -n "$ip" ]; then + break + fi + + was_retried=1 + echo -n '.' >&2 + sleep 2 + done + if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 + fi + + if [ -z "${ip:-}" ]; then + err "Container ${cid} has no IP address" + return 1 + fi + + echo "$ip" +} + +# Poll the given service at the given port:/path until it responds with HTTP code 200. +function poll_ready { + local cid=$1 + local url=$2 + + local -a args=( '-s' '-D-' '-m3' '-w' '%{http_code}' "$url" ) + if [ "$#" -ge 3 ]; then + args+=( ${@:3} ) + fi + + echo "curl arguments: ${args[*]}" + + local -i result=1 + local output + + local -i was_retried=0 + + # retry for max 300s (60*5s) + for _ in $(seq 1 60); do + if [[ $(docker container inspect "$cid" --format '{{ .State.Status}}') == 'exited' ]]; then + err "Container exited ($(docker container inspect "$cid" --format '{{ .Name }}'))" + return 1 + fi + + output="$(curl "${args[@]}" || true)" + if [ "${output: -3}" -eq 200 ]; then + result=0 + break + fi + + was_retried=1 + echo -n 'x' >&2 + sleep 5 + done + if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 + fi + + echo -e "\n${output::-3}" + + return $result +} diff --git a/.github/workflows/scripts/run-tests-core.sh b/.github/workflows/scripts/run-tests-core.sh new file mode 100755 index 0000000..e19e49f --- /dev/null +++ b/.github/workflows/scripts/run-tests-core.sh @@ -0,0 +1,106 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh + + +cid_es="$(container_id elasticsearch)" +cid_ls="$(container_id logstash)" +cid_kb="$(container_id kibana)" + +ip_es="$(service_ip elasticsearch)" +ip_ls="$(service_ip logstash)" +ip_kb="$(service_ip kibana)" + +grouplog 'Wait for readiness of Elasticsearch' +poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd' +endgroup + +grouplog 'Wait for readiness of Logstash' +poll_ready "$cid_ls" 'http://logstash:9600/_node/pipelines/main?pretty' --resolve "logstash:9600:${ip_ls}" +endgroup + +grouplog 'Wait for readiness of Kibana' +poll_ready "$cid_kb" 'http://kibana:5601/api/status' --resolve "kibana:5601:${ip_kb}" -u 'kibana_system:testpasswd' +endgroup + +log 'Sending message to Logstash TCP input' + +declare -i was_retried=0 + +# retry for max 10s (5*2s) +for _ in $(seq 1 5); do + if echo 'dockerelk' | nc -q0 "$ip_ls" 50000; then + break + fi + + was_retried=1 + echo -n 'x' >&2 + sleep 2 +done +if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 +fi + +# It might take a few seconds before the indices and alias are created, so we +# need to be resilient here. +was_retried=0 +declare -a refresh_args=( '-X' 'POST' '-s' '-w' '%{http_code}' '-u' 'elastic:testpasswd' + 'http://elasticsearch:9200/logs-generic-default/_refresh' + '--resolve' "elasticsearch:9200:${ip_es}" +) + +# retry for max 10s (10*1s) +for _ in $(seq 1 10); do + output="$(curl "${refresh_args[@]}")" + if [ "${output: -3}" -eq 200 ]; then + break + fi + + was_retried=1 + echo -n 'x' >&2 + sleep 1 +done +if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 +fi + +log 'Searching message in Elasticsearch' + +# We don't know how much time it will take Logstash to create our document, so +# we need to be resilient here too. +was_retried=0 +declare -a search_args=( '-s' '-u' 'elastic:testpasswd' + 'http://elasticsearch:9200/logs-generic-default/_search?q=message:dockerelk&pretty' + '--resolve' "elasticsearch:9200:${ip_es}" +) +declare -i count +declare response + +# retry for max 10s (10*1s) +for _ in $(seq 1 10); do + response="$(curl "${search_args[@]}")" + count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')" + if (( count )); then + break + fi + + was_retried=1 + echo -n 'x' >&2 + sleep 1 +done +if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 +fi + +echo "$response" +if (( count != 1 )); then + echo "Expected 1 document, got ${count}" + exit 1 +fi diff --git a/.github/workflows/scripts/run-tests-enterprise-search.sh b/.github/workflows/scripts/run-tests-enterprise-search.sh new file mode 100755 index 0000000..50a7bc7 --- /dev/null +++ b/.github/workflows/scripts/run-tests-enterprise-search.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh + + +cid_es="$(container_id elasticsearch)" +cid_en="$(container_id enterprise-search)" + +ip_es="$(service_ip elasticsearch)" +ip_en="$(service_ip enterprise-search)" + +grouplog 'Wait for readiness of Elasticsearch' +poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd' +endgroup + +grouplog 'Wait for readiness of Enterprise Search' +poll_ready "$cid_en" 'http://enterprise-search:3002/api/ent/v1/internal/health' --resolve "enterprise-search:3002:${ip_en}" -u 'elastic:testpasswd' +endgroup + +log 'Ensuring that App Search API keys were created in Elasticsearch' +response="$(curl 'http://elasticsearch:9200/.ent-search-actastic-app_search_api_tokens_v3/_search?q=*:*&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)" +echo "$response" +declare -i count +count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')" +if (( count != 2)); then + echo "Expected search and private keys, got ${count} result(s)" + exit 1 +fi diff --git a/.github/workflows/scripts/run-tests-filebeat.sh b/.github/workflows/scripts/run-tests-filebeat.sh new file mode 100755 index 0000000..e8f035e --- /dev/null +++ b/.github/workflows/scripts/run-tests-filebeat.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh + + +cid_es="$(container_id elasticsearch)" +cid_fb="$(container_id filebeat)" + +ip_es="$(service_ip elasticsearch)" +ip_fb="$(service_ip filebeat)" + +grouplog 'Wait for readiness of Elasticsearch' +poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd' +endgroup + +grouplog 'Wait for readiness of Filebeat' +poll_ready "$cid_fb" 'http://filebeat:5066/?pretty' --resolve "filebeat:5066:${ip_fb}" +endgroup + +# We expect to find log entries for the 'elasticsearch' Compose service using +# the following query: +# +# agent.type:"filebeat" +# AND input.type:"container" +# AND container.name:"docker-elk-elasticsearch-1" +# +log 'Searching documents generated by Filebeat' + +declare response +declare -i count + +declare -i was_retried=0 + +# retry for max 60s (30*2s) +for _ in $(seq 1 30); do + response="$(curl 'http://elasticsearch:9200/filebeat-*/_search?q=agent.type:%22filebeat%22%20AND%20input.type:%22container%22%20AND%20container.name:%22docker-elk-elasticsearch-1%22&size=1&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)" + + set +u # prevent "unbound variable" if assigned value is not an integer + count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')" + set -u + + if (( count > 0 )); then + break + fi + + was_retried=1 + echo -n 'x' >&2 + sleep 2 +done +if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 +fi + +echo "$response" +if (( count == 0 )); then + echo 'Expected at least 1 document' + exit 1 +fi diff --git a/.github/workflows/scripts/run-tests-fleet.sh b/.github/workflows/scripts/run-tests-fleet.sh new file mode 100755 index 0000000..ee07e5f --- /dev/null +++ b/.github/workflows/scripts/run-tests-fleet.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh + + +cid_es="$(container_id elasticsearch)" +cid_fl="$(container_id fleet-server)" +cid_apm="$(container_id apm-server)" + +ip_es="$(service_ip elasticsearch)" +ip_fl="$(service_ip fleet-server)" +ip_apm="$(service_ip apm-server)" + +grouplog 'Wait for readiness of Elasticsearch' +poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd' +endgroup + +grouplog 'Wait for readiness of Fleet Server' +poll_ready "$cid_fl" 'http://fleet-server:8220/api/status' --resolve "fleet-server:8220:${ip_fl}" +endgroup + +grouplog 'Wait for readiness of APM Server' +poll_ready "$cid_apm" 'http://apm-server:8200/' --resolve "apm-server:8200:${ip_apm}" +endgroup + +# We expect to find metrics entries using the following query: +# +# agent.name:"fleet-server" +# AND agent.type:"metricbeat" +# AND event.module:"system" +# AND event.dataset:"system.cpu" +# AND metricset.name:"cpu" +# +log 'Searching a system document generated by Fleet Server' + +declare response +declare -i count + +declare -i was_retried=0 + +# retry for max 60s (30*2s) +for _ in $(seq 1 30); do + response="$(curl 'http://elasticsearch:9200/metrics-system.cpu-default/_search?q=agent.name:%22fleet-server%22%20AND%20agent.type:%22metricbeat%22%20AND%20event.module:%22system%22%20AND%20event.dataset:%22system.cpu%22%20AND%20metricset.name:%22cpu%22&size=1&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)" + + set +u # prevent "unbound variable" if assigned value is not an integer + count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')" + set -u + + if (( count > 0 )); then + break + fi + + was_retried=1 + echo -n 'x' >&2 + sleep 2 +done +if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 +fi + +echo "$response" +# Elastic Agent buffers metrics until Elasticsearch becomes ready, so we +# tolerate multiple results +if (( count == 0 )); then + echo 'Expected at least 1 document' + exit 1 +fi + +# We expect to find log entries for the 'elasticsearch' Compose service using +# the following query: +# +# agent.name:"fleet-server" +# AND agent.type:"filebeat" +# AND container.name:"docker-elk-elasticsearch-1" +# +log 'Searching a container document generated by Fleet Server' + +response= +count=0 + +was_retried=0 + +# retry for max 60s (30*2s) +for _ in $(seq 1 30); do + response="$(curl 'http://elasticsearch:9200/logs-docker.container_logs-default/_search?q=agent.name:%22fleet-server%22%20AND%20agent.type:%22filebeat%22%20AND%20container.name:%22docker-elk-elasticsearch-1%22&size=1&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)" + + set +u # prevent "unbound variable" if assigned value is not an integer + count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')" + set -u + + if (( count > 0 )); then + break + fi + + was_retried=1 + echo -n 'x' >&2 + sleep 2 +done +if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 +fi + +echo "$response" +if (( count == 0 )); then + echo 'Expected at least 1 document' + exit 1 +fi diff --git a/.github/workflows/scripts/run-tests-heartbeat.sh b/.github/workflows/scripts/run-tests-heartbeat.sh new file mode 100755 index 0000000..d4ac20c --- /dev/null +++ b/.github/workflows/scripts/run-tests-heartbeat.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh + + +cid_es="$(container_id elasticsearch)" +cid_hb="$(container_id heartbeat)" + +ip_es="$(service_ip elasticsearch)" +ip_hb="$(service_ip heartbeat)" + +grouplog 'Wait for readiness of Elasticsearch' +poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd' +endgroup + +grouplog 'Wait for readiness of Heartbeat' +poll_ready "$cid_hb" 'http://heartbeat:5066/?pretty' --resolve "heartbeat:5066:${ip_hb}" +endgroup + +# We expect to find heartbeat entries for the 'elasticsearch' HTTP service +# using the following query: +# +# agent.type:"heartbeat" +# AND monitor.type:"http" +# AND url.domain:"elasticsearch" +# +log 'Searching a document generated by Heartbeat' + +declare response +declare -i count + +declare -i was_retried=0 + +# retry for max 60s (30*2s) +for _ in $(seq 1 30); do + response="$(curl 'http://elasticsearch:9200/heartbeat-*/_search?q=agent.type:%22heartbeat%22%20AND%20monitor.type:%22http%22%20AND%20url.domain:%22elasticsearch%22&size=1&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)" + + set +u # prevent "unbound variable" if assigned value is not an integer + count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')" + set -u + + if (( count > 0 )); then + break + fi + + was_retried=1 + echo -n 'x' >&2 + sleep 2 +done +if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 +fi + +echo "$response" +if (( count == 0 )); then + echo 'Expected at least 1 document' + exit 1 +fi diff --git a/.github/workflows/scripts/run-tests-metricbeat.sh b/.github/workflows/scripts/run-tests-metricbeat.sh new file mode 100755 index 0000000..5b998e3 --- /dev/null +++ b/.github/workflows/scripts/run-tests-metricbeat.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh + + +cid_es="$(container_id elasticsearch)" +cid_mb="$(container_id metricbeat)" + +ip_es="$(service_ip elasticsearch)" +ip_mb="$(service_ip metricbeat)" + +grouplog 'Wait for readiness of Elasticsearch' +poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd' +endgroup + +grouplog 'Wait for readiness of Metricbeat' +poll_ready "$cid_mb" 'http://metricbeat:5066/?pretty' --resolve "metricbeat:5066:${ip_mb}" +endgroup + +# We expect to find monitoring entries for the 'elasticsearch' Compose service +# using the following query: +# +# agent.type:"metricbeat" +# AND event.module:"docker" +# AND event.dataset:"docker.container" +# AND container.name:"docker-elk-elasticsearch-1" +# +log 'Searching a document generated by Metricbeat' + +declare response +declare -i count + +declare -i was_retried=0 + +# retry for max 60s (30*2s) +for _ in $(seq 1 30); do + response="$(curl 'http://elasticsearch:9200/metricbeat-*/_search?q=agent.type:%22metricbeat%22%20AND%20event.module:%22docker%22%20AND%20event.dataset:%22docker.container%22%20AND%20container.name:%22docker-elk-elasticsearch-1%22&size=1&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)" + + set +u # prevent "unbound variable" if assigned value is not an integer + count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')" + set -u + + if (( count > 0 )); then + break + fi + + was_retried=1 + echo -n 'x' >&2 + sleep 2 +done +if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 +fi + +echo "$response" +# Metricbeat buffers metrics until Elasticsearch becomes ready, so we tolerate +# multiple results +if (( count == 0 )); then + echo 'Expected at least 1 document' + exit 1 +fi diff --git a/.github/workflows/spam-issue-close.yml b/.github/workflows/spam-issue-close.yml new file mode 100644 index 0000000..4de11c1 --- /dev/null +++ b/.github/workflows/spam-issue-close.yml @@ -0,0 +1,57 @@ +name: Close issues without context + +permissions: + issues: write + +on: + issues: + types: [ labeled ] + +jobs: + + close-lock: + name: Close and lock issues + if: contains(github.event.issue.labels.*.name, 'bot:close') && github.event.issue.state == 'open' + runs-on: ubuntu-latest + + steps: + - name: Close + id: close + uses: actions/stale@v9.0.0 + with: + days-before-issue-stale: -1 + days-before-issue-close: 0 + stale-issue-label: bot:close + close-issue-label: insufficient information + close-issue-message: >- + This description omits all, or critical parts of the information requested by maintainers to be able to + reproduce the issue: + + + - the **complete** log history of your Elastic components, including `setup`. + - any change(s) performed to the docker-elk configuration. + - details about the runtime environment, for both Docker and Compose. + + + Therefore, this issue will now be **closed**. Please open a new issue and fill in the template. It saves + everyone's efforts, and allows maintainers to provide you with a solution in as few round trips as possible. + + Thank you for your understanding. :pray: + + # Due to eventual consistency, listing closed issues immediately after a + # close does not always yield the expected results. A sleep is a simple + # enough remediation to this issue. + - name: Pause + if: fromJson(steps.close.outputs.closed-issues-prs)[0] + run: sleep 5 + + - name: Lock + uses: dessant/lock-threads@v5 + if: fromJson(steps.close.outputs.closed-issues-prs)[0] + with: + process-only: issues + issue-inactive-days: 0 + include-any-issue-labels: bot:close + remove-issue-labels: bot:close + issue-lock-reason: spam + log-output: true diff --git a/.github/workflows/update-merge.yml b/.github/workflows/update-merge.yml new file mode 100644 index 0000000..b7670b1 --- /dev/null +++ b/.github/workflows/update-merge.yml @@ -0,0 +1,42 @@ +name: Merge Elastic updates + +on: + workflow_run: + workflows: [ CI ] + types: + - completed + branches: + - update/main + - update/tls + - update/release-7.x + +jobs: + + merge: + name: Merge pull request + if: github.event.workflow_run.conclusion == 'success' + runs-on: ubuntu-latest + + steps: + - name: Impersonate update bot + uses: actions/create-github-app-token@v1 + id: generate-token + with: + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.APP_PRIVATE_KEY }} + + - name: Approve and merge + uses: ridedott/merge-me-action@v2 + with: + GITHUB_LOGIN: docker-elk-updater + GITHUB_TOKEN: ${{ steps.generate-token.outputs.token }} + + - name: Delete branch + uses: actions/github-script@v7 + with: + script: | + await github.request('DELETE /repos/{owner}/{repo}/git/refs/{ref}', { + owner: '${{ github.event.workflow_run.repository.owner.login }}', + repo: '${{ github.event.workflow_run.repository.name }}', + ref: 'heads/${{ github.event.workflow_run.head_branch }}' + }) diff --git a/.github/workflows/update.yml b/.github/workflows/update.yml new file mode 100644 index 0000000..09752da --- /dev/null +++ b/.github/workflows/update.yml @@ -0,0 +1,103 @@ +name: Update Elastic release + +on: + schedule: + - cron: '0 0 * * 0' # At 00:00 every Sunday + +jobs: + + check-and-update: + name: Check and update Elastic release + runs-on: ubuntu-latest + strategy: + matrix: + include: + - release: 8.x + branch: main + - release: 8.x + branch: tls + - release: 7.x + branch: release-7.x + + steps: + - uses: actions/setup-node@v4 + - run: npm install semver + + - name: Get latest release version + uses: actions/github-script@v7 + id: get-latest-release + with: + script: | + const semver = require('semver') + + const latestVersion = await github. + paginate(github.rest.repos.listReleases, { + owner: 'elastic', + repo: 'elasticsearch' + }) + .then(releases => { + for (const release of releases) { + // Results are returned sorted by created_at, so it is safe to assume + // that the first encountered match is also the series' latest release. + + const version=semver.clean(release.tag_name) + + if (semver.satisfies(version, '${{ matrix.release }}')) { + return version + } + } + }); + + if (latestVersion) { + // Return an object so that the result can be handled as structured data + // instead of a quoted string in subsequent steps. + return { version: latestVersion } + } + + - uses: actions/checkout@v4 + if: steps.get-latest-release.outputs.result + with: + ref: ${{ matrix.branch }} + + - name: Update stack version + id: update-files + if: steps.get-latest-release.outputs.result + run: | + source .env + cur_ver="$ELASTIC_VERSION" + new_ver=${{ fromJson(steps.get-latest-release.outputs.result).version }} + + # Escape period characters so sed interprets them literally + cur_ver="${cur_ver//./\\.}" + + declare -a upd_files=( .env README.md */Dockerfile extensions/*/Dockerfile ) + if [ -f tls/README.md ]; then + upd_files+=( tls/README.md ) + fi + + sed -i "s/${cur_ver}/${new_ver}/g" "${upd_files[@]}" + + git_status="$(git status --porcelain)" + if [[ ${git_status} ]]; then + echo -e 'Changes to be committed:\n' + echo "${git_status}" + echo 'has-changes=true' >>"$GITHUB_OUTPUT" + fi + + - name: Impersonate update bot + uses: actions/create-github-app-token@v1 + id: generate-token + if: steps.update-files.outputs.has-changes + with: + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.APP_PRIVATE_KEY }} + + - name: Send pull request to update to new version + if: steps.update-files.outputs.has-changes + uses: peter-evans/create-pull-request@v7 + with: + token: ${{ steps.generate-token.outputs.token }} + branch: update/${{ matrix.branch }} + commit-message: Update to v${{ fromJson(steps.get-latest-release.outputs.result).version }} + title: Update to v${{ fromJson(steps.get-latest-release.outputs.result).version }} + delete-branch: true diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..0dbd69f --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Anthony Lapenna + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..ee046c2 --- /dev/null +++ b/README.md @@ -0,0 +1,498 @@ +# Elastic stack (ELK) on Docker + +[![Elastic Stack version](https://img.shields.io/badge/Elastic%20Stack-8.15.1-00bfb3?style=flat&logo=elastic-stack)](https://www.elastic.co/blog/category/releases) +[![Build Status](https://github.com/deviantony/docker-elk/workflows/CI/badge.svg?branch=main)](https://github.com/deviantony/docker-elk/actions?query=workflow%3ACI+branch%3Amain) +[![Join the chat](https://badges.gitter.im/Join%20Chat.svg)](https://app.gitter.im/#/room/#deviantony_docker-elk:gitter.im) + +Run the latest version of the [Elastic stack][elk-stack] with Docker and Docker Compose. + +It gives you the ability to analyze any data set by using the searching/aggregation capabilities of Elasticsearch and +the visualization power of Kibana. + +Based on the [official Docker images][elastic-docker] from Elastic: + +* [Elasticsearch](https://github.com/elastic/elasticsearch/tree/main/distribution/docker) +* [Logstash](https://github.com/elastic/logstash/tree/main/docker) +* [Kibana](https://github.com/elastic/kibana/tree/main/src/dev/build/tasks/os_packages/docker_generator) + +Other available stack variants: + +* [`tls`](https://github.com/deviantony/docker-elk/tree/tls): TLS encryption enabled in Elasticsearch, Kibana (opt in), + and Fleet +* [`searchguard`](https://github.com/deviantony/docker-elk/tree/searchguard): Search Guard support + +> [!IMPORTANT] +> [Platinum][subscriptions] features are enabled by default for a [trial][license-mngmt] duration of **30 days**. After +> this evaluation period, you will retain access to all the free features included in the Open Basic license seamlessly, +> without manual intervention required, and without losing any data. Refer to the [How to disable paid +> features](#how-to-disable-paid-features) section to opt out of this behaviour. + +--- + +## tl;dr + +```sh +docker compose up setup +``` + +```sh +docker compose up +``` + +![Animated demo](https://user-images.githubusercontent.com/3299086/155972072-0c89d6db-707a-47a1-818b-5f976565f95a.gif) + +--- + +## Philosophy + +We aim at providing the simplest possible entry into the Elastic stack for anybody who feels like experimenting with +this powerful combo of technologies. This project's default configuration is purposely minimal and unopinionated. It +does not rely on any external dependency, and uses as little custom automation as necessary to get things up and +running. + +Instead, we believe in good documentation so that you can use this repository as a template, tweak it, and make it _your +own_. [sherifabdlnaby/elastdocker][elastdocker] is one example among others of project that builds upon this idea. + +--- + +## Contents + +1. [Requirements](#requirements) + * [Host setup](#host-setup) + * [Docker Desktop](#docker-desktop) + * [Windows](#windows) + * [macOS](#macos) +1. [Usage](#usage) + * [Bringing up the stack](#bringing-up-the-stack) + * [Initial setup](#initial-setup) + * [Setting up user authentication](#setting-up-user-authentication) + * [Injecting data](#injecting-data) + * [Cleanup](#cleanup) + * [Version selection](#version-selection) +1. [Configuration](#configuration) + * [How to configure Elasticsearch](#how-to-configure-elasticsearch) + * [How to configure Kibana](#how-to-configure-kibana) + * [How to configure Logstash](#how-to-configure-logstash) + * [How to disable paid features](#how-to-disable-paid-features) + * [How to scale out the Elasticsearch cluster](#how-to-scale-out-the-elasticsearch-cluster) + * [How to re-execute the setup](#how-to-re-execute-the-setup) + * [How to reset a password programmatically](#how-to-reset-a-password-programmatically) +1. [Extensibility](#extensibility) + * [How to add plugins](#how-to-add-plugins) + * [How to enable the provided extensions](#how-to-enable-the-provided-extensions) +1. [JVM tuning](#jvm-tuning) + * [How to specify the amount of memory used by a service](#how-to-specify-the-amount-of-memory-used-by-a-service) + * [How to enable a remote JMX connection to a service](#how-to-enable-a-remote-jmx-connection-to-a-service) +1. [Going further](#going-further) + * [Plugins and integrations](#plugins-and-integrations) + +## Requirements + +### Host setup + +* [Docker Engine][docker-install] version **18.06.0** or newer +* [Docker Compose][compose-install] version **2.0.0** or newer +* 1.5 GB of RAM + +> [!NOTE] +> Especially on Linux, make sure your user has the [required permissions][linux-postinstall] to interact with the Docker +> daemon. + +By default, the stack exposes the following ports: + +* 5044: Logstash Beats input +* 50000: Logstash TCP input +* 9600: Logstash monitoring API +* 9200: Elasticsearch HTTP +* 9300: Elasticsearch TCP transport +* 5601: Kibana + +> [!WARNING] +> Elasticsearch's [bootstrap checks][bootstrap-checks] were purposely disabled to facilitate the setup of the Elastic +> stack in development environments. For production setups, we recommend users to set up their host according to the +> instructions from the Elasticsearch documentation: [Important System Configuration][es-sys-config]. + +### Docker Desktop + +#### Windows + +If you are using the legacy Hyper-V mode of _Docker Desktop for Windows_, ensure [File Sharing][win-filesharing] is +enabled for the `C:` drive. + +#### macOS + +The default configuration of _Docker Desktop for Mac_ allows mounting files from `/Users/`, `/Volume/`, `/private/`, +`/tmp` and `/var/folders` exclusively. Make sure the repository is cloned in one of those locations or follow the +instructions from the [documentation][mac-filesharing] to add more locations. + +## Usage + +> [!WARNING] +> You must rebuild the stack images with `docker compose build` whenever you switch branch or update the +> [version](#version-selection) of an already existing stack. + +### Bringing up the stack + +Clone this repository onto the Docker host that will run the stack with the command below: + +```sh +git clone https://github.com/deviantony/docker-elk.git +``` + +Then, initialize the Elasticsearch users and groups required by docker-elk by executing the command: + +```sh +docker compose up setup +``` + +If everything went well and the setup completed without error, start the other stack components: + +```sh +docker compose up +``` + +> [!NOTE] +> You can also run all services in the background (detached mode) by appending the `-d` flag to the above command. + +Give Kibana about a minute to initialize, then access the Kibana web UI by opening in a web +browser and use the following (default) credentials to log in: + +* user: *elastic* +* password: *changeme* + +> [!NOTE] +> Upon the initial startup, the `elastic`, `logstash_internal` and `kibana_system` Elasticsearch users are intialized +> with the values of the passwords defined in the [`.env`](.env) file (_"changeme"_ by default). The first one is the +> [built-in superuser][builtin-users], the other two are used by Kibana and Logstash respectively to communicate with +> Elasticsearch. This task is only performed during the _initial_ startup of the stack. To change users' passwords +> _after_ they have been initialized, please refer to the instructions in the next section. + +### Initial setup + +#### Setting up user authentication + +> [!NOTE] +> Refer to [Security settings in Elasticsearch][es-security] to disable authentication. + +> [!WARNING] +> Starting with Elastic v8.0.0, it is no longer possible to run Kibana using the bootstraped privileged `elastic` user. + +The _"changeme"_ password set by default for all aforementioned users is **unsecure**. For increased security, we will +reset the passwords of all aforementioned Elasticsearch users to random secrets. + +1. Reset passwords for default users + + The commands below reset the passwords of the `elastic`, `logstash_internal` and `kibana_system` users. Take note + of them. + + ```sh + docker compose exec elasticsearch bin/elasticsearch-reset-password --batch --user elastic + ``` + + ```sh + docker compose exec elasticsearch bin/elasticsearch-reset-password --batch --user logstash_internal + ``` + + ```sh + docker compose exec elasticsearch bin/elasticsearch-reset-password --batch --user kibana_system + ``` + + If the need for it arises (e.g. if you want to [collect monitoring information][ls-monitoring] through Beats and + other components), feel free to repeat this operation at any time for the rest of the [built-in + users][builtin-users]. + +1. Replace usernames and passwords in configuration files + + Replace the password of the `elastic` user inside the `.env` file with the password generated in the previous step. + Its value isn't used by any core component, but [extensions](#how-to-enable-the-provided-extensions) use it to + connect to Elasticsearch. + + > [!NOTE] + > In case you don't plan on using any of the provided [extensions](#how-to-enable-the-provided-extensions), or + > prefer to create your own roles and users to authenticate these services, it is safe to remove the + > `ELASTIC_PASSWORD` entry from the `.env` file altogether after the stack has been initialized. + + Replace the password of the `logstash_internal` user inside the `.env` file with the password generated in the + previous step. Its value is referenced inside the Logstash pipeline file (`logstash/pipeline/logstash.conf`). + + Replace the password of the `kibana_system` user inside the `.env` file with the password generated in the previous + step. Its value is referenced inside the Kibana configuration file (`kibana/config/kibana.yml`). + + See the [Configuration](#configuration) section below for more information about these configuration files. + +1. Restart Logstash and Kibana to re-connect to Elasticsearch using the new passwords + + ```sh + docker compose up -d logstash kibana + ``` + +> [!NOTE] +> Learn more about the security of the Elastic stack at [Secure the Elastic Stack][sec-cluster]. + +#### Injecting data + +Launch the Kibana web UI by opening in a web browser, and use the following credentials to log +in: + +* user: *elastic* +* password: *\* + +Now that the stack is fully configured, you can go ahead and inject some log entries. + +The shipped Logstash configuration allows you to send data over the TCP port 50000. For example, you can use one of the +following commands — depending on your installed version of `nc` (Netcat) — to ingest the content of the log file +`/path/to/logfile.log` in Elasticsearch, via Logstash: + +```sh +# Execute `nc -h` to determine your `nc` version + +cat /path/to/logfile.log | nc -q0 localhost 50000 # BSD +cat /path/to/logfile.log | nc -c localhost 50000 # GNU +cat /path/to/logfile.log | nc --send-only localhost 50000 # nmap +``` + +You can also load the sample data provided by your Kibana installation. + +### Cleanup + +Elasticsearch data is persisted inside a volume by default. + +In order to entirely shutdown the stack and remove all persisted data, use the following Docker Compose command: + +```sh +docker compose down -v +``` + +### Version selection + +This repository stays aligned with the latest version of the Elastic stack. The `main` branch tracks the current major +version (8.x). + +To use a different version of the core Elastic components, simply change the version number inside the [`.env`](.env) +file. If you are upgrading an existing stack, remember to rebuild all container images using the `docker compose build` +command. + +> [!IMPORTANT] +> Always pay attention to the [official upgrade instructions][upgrade] for each individual component before performing a +> stack upgrade. + +Older major versions are also supported on separate branches: + +* [`release-7.x`](https://github.com/deviantony/docker-elk/tree/release-7.x): 7.x series +* [`release-6.x`](https://github.com/deviantony/docker-elk/tree/release-6.x): 6.x series (End-of-life) +* [`release-5.x`](https://github.com/deviantony/docker-elk/tree/release-5.x): 5.x series (End-of-life) + +## Configuration + +> [!IMPORTANT] +> Configuration is not dynamically reloaded, you will need to restart individual components after any configuration +> change. + +### How to configure Elasticsearch + +The Elasticsearch configuration is stored in [`elasticsearch/config/elasticsearch.yml`][config-es]. + +You can also specify the options you want to override by setting environment variables inside the Compose file: + +```yml +elasticsearch: + + environment: + network.host: _non_loopback_ + cluster.name: my-cluster +``` + +Please refer to the following documentation page for more details about how to configure Elasticsearch inside Docker +containers: [Install Elasticsearch with Docker][es-docker]. + +### How to configure Kibana + +The Kibana default configuration is stored in [`kibana/config/kibana.yml`][config-kbn]. + +You can also specify the options you want to override by setting environment variables inside the Compose file: + +```yml +kibana: + + environment: + SERVER_NAME: kibana.example.org +``` + +Please refer to the following documentation page for more details about how to configure Kibana inside Docker +containers: [Install Kibana with Docker][kbn-docker]. + +### How to configure Logstash + +The Logstash configuration is stored in [`logstash/config/logstash.yml`][config-ls]. + +You can also specify the options you want to override by setting environment variables inside the Compose file: + +```yml +logstash: + + environment: + LOG_LEVEL: debug +``` + +Please refer to the following documentation page for more details about how to configure Logstash inside Docker +containers: [Configuring Logstash for Docker][ls-docker]. + +### How to disable paid features + +You can cancel an ongoing trial before its expiry date — and thus revert to a basic license — either from the [License +Management][license-mngmt] panel of Kibana, or using Elasticsearch's `start_basic` [Licensing API][license-apis]. Please +note that the second option is the only way to recover access to Kibana if the license isn't either switched to `basic` +or upgraded before the trial's expiry date. + +Changing the license type by switching the value of Elasticsearch's `xpack.license.self_generated.type` setting from +`trial` to `basic` (see [License settings][license-settings]) will only work **if done prior to the initial setup.** +After a trial has been started, the loss of features from `trial` to `basic` _must_ be acknowledged using one of the two +methods described in the first paragraph. + +### How to scale out the Elasticsearch cluster + +Follow the instructions from the Wiki: [Scaling out Elasticsearch](https://github.com/deviantony/docker-elk/wiki/Elasticsearch-cluster) + +### How to re-execute the setup + +To run the setup container again and re-initialize all users for which a password was defined inside the `.env` file, +simply "up" the `setup` Compose service again: + +```console +$ docker compose up setup + ⠿ Container docker-elk-elasticsearch-1 Running + ⠿ Container docker-elk-setup-1 Created +Attaching to docker-elk-setup-1 +... +docker-elk-setup-1 | [+] User 'monitoring_internal' +docker-elk-setup-1 | ⠿ User does not exist, creating +docker-elk-setup-1 | [+] User 'beats_system' +docker-elk-setup-1 | ⠿ User exists, setting password +docker-elk-setup-1 exited with code 0 +``` + +### How to reset a password programmatically + +If for any reason your are unable to use Kibana to change the password of your users (including [built-in +users][builtin-users]), you can use the Elasticsearch API instead and achieve the same result. + +In the example below, we reset the password of the `elastic` user (notice "/user/elastic" in the URL): + +```sh +curl -XPOST -D- 'http://localhost:9200/_security/user/elastic/_password' \ + -H 'Content-Type: application/json' \ + -u elastic: \ + -d '{"password" : ""}' +``` + +## Extensibility + +### How to add plugins + +To add plugins to any ELK component you have to: + +1. Add a `RUN` statement to the corresponding `Dockerfile` (eg. `RUN logstash-plugin install logstash-filter-json`) +1. Add the associated plugin code configuration to the service configuration (eg. Logstash input/output) +1. Rebuild the images using the `docker compose build` command + +### How to enable the provided extensions + +A few extensions are available inside the [`extensions`](extensions) directory. These extensions provide features which +are not part of the standard Elastic stack, but can be used to enrich it with extra integrations. + +The documentation for these extensions is provided inside each individual subdirectory, on a per-extension basis. Some +of them require manual changes to the default ELK configuration. + +## JVM tuning + +### How to specify the amount of memory used by a service + +The startup scripts for Elasticsearch and Logstash can append extra JVM options from the value of an environment +variable, allowing the user to adjust the amount of memory that can be used by each component: + +| Service | Environment variable | +|---------------|----------------------| +| Elasticsearch | ES_JAVA_OPTS | +| Logstash | LS_JAVA_OPTS | + +To accommodate environments where memory is scarce (Docker Desktop for Mac has only 2 GB available by default), the Heap +Size allocation is capped by default in the `docker-compose.yml` file to 512 MB for Elasticsearch and 256 MB for +Logstash. If you want to override the default JVM configuration, edit the matching environment variable(s) in the +`docker-compose.yml` file. + +For example, to increase the maximum JVM Heap Size for Logstash: + +```yml +logstash: + + environment: + LS_JAVA_OPTS: -Xms1g -Xmx1g +``` + +When these options are not set: + +* Elasticsearch starts with a JVM Heap Size that is [determined automatically][es-heap]. +* Logstash starts with a fixed JVM Heap Size of 1 GB. + +### How to enable a remote JMX connection to a service + +As for the Java Heap memory (see above), you can specify JVM options to enable JMX and map the JMX port on the Docker +host. + +Update the `{ES,LS}_JAVA_OPTS` environment variable with the following content (I've mapped the JMX service on the port +18080, you can change that). Do not forget to update the `-Djava.rmi.server.hostname` option with the IP address of your +Docker host (replace **DOCKER_HOST_IP**): + +```yml +logstash: + + environment: + LS_JAVA_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=18080 -Dcom.sun.management.jmxremote.rmi.port=18080 -Djava.rmi.server.hostname=DOCKER_HOST_IP -Dcom.sun.management.jmxremote.local.only=false +``` + +## Going further + +### Plugins and integrations + +See the following Wiki pages: + +* [External applications](https://github.com/deviantony/docker-elk/wiki/External-applications) +* [Popular integrations](https://github.com/deviantony/docker-elk/wiki/Popular-integrations) + +[elk-stack]: https://www.elastic.co/what-is/elk-stack +[elastic-docker]: https://www.docker.elastic.co/ +[subscriptions]: https://www.elastic.co/subscriptions +[es-security]: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html +[license-settings]: https://www.elastic.co/guide/en/elasticsearch/reference/current/license-settings.html +[license-mngmt]: https://www.elastic.co/guide/en/kibana/current/managing-licenses.html +[license-apis]: https://www.elastic.co/guide/en/elasticsearch/reference/current/licensing-apis.html + +[elastdocker]: https://github.com/sherifabdlnaby/elastdocker + +[docker-install]: https://docs.docker.com/get-docker/ +[compose-install]: https://docs.docker.com/compose/install/ +[linux-postinstall]: https://docs.docker.com/engine/install/linux-postinstall/ + +[bootstrap-checks]: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html +[es-sys-config]: https://www.elastic.co/guide/en/elasticsearch/reference/current/system-config.html +[es-heap]: https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#heap-size-settings + +[win-filesharing]: https://docs.docker.com/desktop/settings/windows/#file-sharing +[mac-filesharing]: https://docs.docker.com/desktop/settings/mac/#file-sharing + +[builtin-users]: https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html +[ls-monitoring]: https://www.elastic.co/guide/en/logstash/current/monitoring-with-metricbeat.html +[sec-cluster]: https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-cluster.html + +[connect-kibana]: https://www.elastic.co/guide/en/kibana/current/connect-to-elasticsearch.html +[index-pattern]: https://www.elastic.co/guide/en/kibana/current/index-patterns.html + +[config-es]: ./elasticsearch/config/elasticsearch.yml +[config-kbn]: ./kibana/config/kibana.yml +[config-ls]: ./logstash/config/logstash.yml + +[es-docker]: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html +[kbn-docker]: https://www.elastic.co/guide/en/kibana/current/docker.html +[ls-docker]: https://www.elastic.co/guide/en/logstash/current/docker-config.html + +[upgrade]: https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..e7c07b9 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,112 @@ +services: + + # The 'setup' service runs a one-off script which initializes users inside + # Elasticsearch — such as 'logstash_internal' and 'kibana_system' — with the + # values of the passwords defined in the '.env' file. It also creates the + # roles required by some of these users. + # + # This task only needs to be performed once, during the *initial* startup of + # the stack. Any subsequent run will reset the passwords of existing users to + # the values defined inside the '.env' file, and the built-in roles to their + # default permissions. + # + # By default, it is excluded from the services started by 'docker compose up' + # due to the non-default profile it belongs to. To run it, either provide the + # '--profile=setup' CLI flag to Compose commands, or "up" the service by name + # such as 'docker compose up setup'. + setup: + profiles: + - setup + build: + context: setup/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + init: true + volumes: + - ./setup/entrypoint.sh:/entrypoint.sh:ro,Z + - ./setup/lib.sh:/lib.sh:ro,Z + - ./setup/roles:/roles:ro,Z + environment: + ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-} + LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-} + KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-} + METRICBEAT_INTERNAL_PASSWORD: ${METRICBEAT_INTERNAL_PASSWORD:-} + FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-} + HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-} + MONITORING_INTERNAL_PASSWORD: ${MONITORING_INTERNAL_PASSWORD:-} + BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-} + networks: + - elk + depends_on: + - elasticsearch + + elasticsearch: + build: + context: elasticsearch/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + volumes: + - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,Z + - elasticsearch:/usr/share/elasticsearch/data:Z + ports: + - 9200:9200 + - 9300:9300 + environment: + node.name: elasticsearch + ES_JAVA_OPTS: -Xms512m -Xmx512m + # Bootstrap password. + # Used to initialize the keystore during the initial startup of + # Elasticsearch. Ignored on subsequent runs. + ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-} + # Use single node discovery in order to disable production mode and avoid bootstrap checks. + # see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html + discovery.type: single-node + networks: + - elk + restart: unless-stopped + + logstash: + build: + context: logstash/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + volumes: + - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z + - ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z + ports: + - 5044:5044 + - 50000:50000/tcp + - 50000:50000/udp + - 9600:9600 + environment: + LS_JAVA_OPTS: -Xms256m -Xmx256m + LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-} + networks: + - elk + depends_on: + - elasticsearch + restart: unless-stopped + + kibana: + build: + context: kibana/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + volumes: + - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z + ports: + - 5601:5601 + environment: + KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-} + networks: + - elk + depends_on: + - elasticsearch + restart: unless-stopped + +networks: + elk: + driver: bridge + +volumes: + elasticsearch: diff --git a/elasticsearch/.dockerignore b/elasticsearch/.dockerignore new file mode 100644 index 0000000..37eef9d --- /dev/null +++ b/elasticsearch/.dockerignore @@ -0,0 +1,6 @@ +# Ignore Docker build files +Dockerfile +.dockerignore + +# Ignore OS artifacts +**/.DS_Store diff --git a/elasticsearch/Dockerfile b/elasticsearch/Dockerfile new file mode 100644 index 0000000..39e4ab3 --- /dev/null +++ b/elasticsearch/Dockerfile @@ -0,0 +1,7 @@ +ARG ELASTIC_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION:-8.15.1} + +# Add your elasticsearch plugins setup here +# Example: RUN elasticsearch-plugin install analysis-icu diff --git a/elasticsearch/config/elasticsearch.yml b/elasticsearch/config/elasticsearch.yml new file mode 100644 index 0000000..d66f071 --- /dev/null +++ b/elasticsearch/config/elasticsearch.yml @@ -0,0 +1,12 @@ +--- +## Default Elasticsearch configuration from Elasticsearch base image. +## https://github.com/elastic/elasticsearch/blob/main/distribution/docker/src/docker/config/elasticsearch.yml +# +cluster.name: docker-cluster +network.host: 0.0.0.0 + +## X-Pack settings +## see https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html +# +xpack.license.self_generated.type: trial +xpack.security.enabled: true diff --git a/extensions/README.md b/extensions/README.md new file mode 100644 index 0000000..50016fb --- /dev/null +++ b/extensions/README.md @@ -0,0 +1,3 @@ +# Extensions + +Third-party extensions that enable extra integrations with the Elastic stack. diff --git a/extensions/curator/.dockerignore b/extensions/curator/.dockerignore new file mode 100644 index 0000000..37eef9d --- /dev/null +++ b/extensions/curator/.dockerignore @@ -0,0 +1,6 @@ +# Ignore Docker build files +Dockerfile +.dockerignore + +# Ignore OS artifacts +**/.DS_Store diff --git a/extensions/curator/Dockerfile b/extensions/curator/Dockerfile new file mode 100644 index 0000000..1836c7f --- /dev/null +++ b/extensions/curator/Dockerfile @@ -0,0 +1,9 @@ +FROM untergeek/curator:8.0.10 + +USER root + +RUN >>/var/spool/cron/crontabs/nobody \ + echo '* * * * * /curator/curator /.curator/delete_log_files_curator.yml' + +ENTRYPOINT ["crond"] +CMD ["-f", "-d8"] diff --git a/extensions/curator/README.md b/extensions/curator/README.md new file mode 100644 index 0000000..e19ebe7 --- /dev/null +++ b/extensions/curator/README.md @@ -0,0 +1,20 @@ +# Curator + +Elasticsearch Curator helps you curate or manage your indices. + +## Usage + +If you want to include the Curator extension, run Docker Compose from the root of the repository with an additional +command line argument referencing the `curator-compose.yml` file: + +```bash +$ docker compose -f docker-compose.yml -f extensions/curator/curator-compose.yml up +``` + +This sample setup demonstrates how to run `curator` every minute using `cron`. + +All configuration files are available in the `config/` directory. + +## Documentation + +[Curator Reference](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html) diff --git a/extensions/curator/config/curator.yml b/extensions/curator/config/curator.yml new file mode 100644 index 0000000..6777edc --- /dev/null +++ b/extensions/curator/config/curator.yml @@ -0,0 +1,13 @@ +# Curator configuration +# https://www.elastic.co/guide/en/elasticsearch/client/curator/current/configfile.html + +elasticsearch: + client: + hosts: [ http://elasticsearch:9200 ] + other_settings: + username: elastic + password: ${ELASTIC_PASSWORD} + +logging: + loglevel: INFO + logformat: default diff --git a/extensions/curator/config/delete_log_files_curator.yml b/extensions/curator/config/delete_log_files_curator.yml new file mode 100644 index 0000000..779c67a --- /dev/null +++ b/extensions/curator/config/delete_log_files_curator.yml @@ -0,0 +1,21 @@ +actions: + 1: + action: delete_indices + description: >- + Delete indices. Find which to delete by first limiting the list to + logstash- prefixed indices. Then further filter those to prevent deletion + of anything less than the number of days specified by unit_count. + Ignore the error if the filter does not result in an actionable list of + indices (ignore_empty_list) and exit cleanly. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: prefix + value: logstash- + - filtertype: age + source: creation_date + direction: older + unit: days + unit_count: 2 diff --git a/extensions/curator/curator-compose.yml b/extensions/curator/curator-compose.yml new file mode 100644 index 0000000..78734c5 --- /dev/null +++ b/extensions/curator/curator-compose.yml @@ -0,0 +1,14 @@ +services: + curator: + build: + context: extensions/curator/ + init: true + volumes: + - ./extensions/curator/config/curator.yml:/.curator/curator.yml:ro,Z + - ./extensions/curator/config/delete_log_files_curator.yml:/.curator/delete_log_files_curator.yml:ro,Z + environment: + ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-} + networks: + - elk + depends_on: + - elasticsearch diff --git a/extensions/enterprise-search/.dockerignore b/extensions/enterprise-search/.dockerignore new file mode 100644 index 0000000..37eef9d --- /dev/null +++ b/extensions/enterprise-search/.dockerignore @@ -0,0 +1,6 @@ +# Ignore Docker build files +Dockerfile +.dockerignore + +# Ignore OS artifacts +**/.DS_Store diff --git a/extensions/enterprise-search/Dockerfile b/extensions/enterprise-search/Dockerfile new file mode 100644 index 0000000..a68c519 --- /dev/null +++ b/extensions/enterprise-search/Dockerfile @@ -0,0 +1,4 @@ +ARG ELASTIC_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/enterprise-search/enterprise-search:${ELASTIC_VERSION:-8.15.1} diff --git a/extensions/enterprise-search/README.md b/extensions/enterprise-search/README.md new file mode 100644 index 0000000..e8ac573 --- /dev/null +++ b/extensions/enterprise-search/README.md @@ -0,0 +1,144 @@ +# Enterprise Search extension + +Elastic Enterprise Search is a suite of products for search applications backed by the Elastic Stack. + +## Requirements + +* 2 GB of free RAM, on top of the resources required by the other stack components and extensions. + +The Enterprise Search web application is served on the TCP port `3002`. + +## Usage + +### Generate an encryption key + +Enterprise Search requires one or more [encryption keys][enterprisesearch-encryption] to be configured before the +initial startup. Failing to do so prevents the server from starting. + +Encryption keys can contain any series of characters. Elastic recommends using 256-bit keys for optimal security. + +Those encryption keys must be added manually to the [`config/enterprise-search.yml`][config-enterprisesearch] file. By +default, the list of encryption keys is empty and must be populated using one of the following formats: + +```yaml +secret_management.encryption_keys: + - my_first_encryption_key + - my_second_encryption_key + - ... +``` + +```yaml +secret_management.encryption_keys: [my_first_encryption_key, my_second_encryption_key, ...] +``` + +> [!NOTE] +> To generate a strong random encryption key, you can use the OpenSSL utility or any other online/offline tool of your +> choice: +> +> ```console +> $ openssl rand -hex 32 +> 680f94e568c90364bedf927b2f0f49609702d3eab9098688585a375b14274546 +> ``` + +### Enable Elasticsearch's API key service + +Enterprise Search requires Elasticsearch's built-in [API key service][es-security] to be enabled in order to start. +Unless Elasticsearch is configured to enable TLS on the HTTP interface (disabled by default), this service is disabled +by default. + +To enable it, modify the Elasticsearch configuration file in [`elasticsearch/config/elasticsearch.yml`][config-es] and +add the following setting: + +```yaml +xpack.security.authc.api_key.enabled: true +``` + +### Configure the Enterprise Search host in Kibana + +Kibana acts as the [management interface][enterprisesearch-kb] to Enterprise Search. + +To enable the management experience for Enterprise Search, modify the Kibana configuration file in +[`kibana/config/kibana.yml`][config-kbn] and add the following setting: + +```yaml +enterpriseSearch.host: http://enterprise-search:3002 +``` + +### Start the server + +To include Enterprise Search in the stack, run Docker Compose from the root of the repository with an additional command +line argument referencing the `enterprise-search-compose.yml` file: + +```console +$ docker compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up +``` + +Allow a few minutes for the stack to start, then open your web browser at the address to see the +Enterprise Search home page. + +Enterprise Search is configured on first boot with the following default credentials: + +* user: *enterprise_search* +* password: *changeme* + +## Security + +The Enterprise Search password is defined inside the Compose file via the `ENT_SEARCH_DEFAULT_PASSWORD` environment +variable. We highly recommend choosing a more secure password than the default one for security reasons. + +To do so, change the value `ENT_SEARCH_DEFAULT_PASSWORD` environment variable inside the Compose file **before the first +boot**: + +```yaml +enterprise-search: + + environment: + ENT_SEARCH_DEFAULT_PASSWORD: {{some strong password}} +``` + +> [!WARNING] +> The default Enterprise Search password can only be set during the initial boot. Once the password is persisted in +> Elasticsearch, it can only be changed via the Elasticsearch API. + +For more information, please refer to [User Management and Security][enterprisesearch-security]. + +## Configuring Enterprise Search + +The Enterprise Search configuration is stored in [`config/enterprise-search.yml`][config-enterprisesearch]. You can +modify this file using the [Default Enterprise Search configuration][enterprisesearch-config] as a reference. + +You can also specify the options you want to override by setting environment variables inside the Compose file: + +```yaml +enterprise-search: + + environment: + ent_search.auth.source: standard + worker.threads: '6' +``` + +Any change to the Enterprise Search configuration requires a restart of the Enterprise Search container: + +```console +$ docker compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml restart enterprise-search +``` + +Please refer to the following documentation page for more details about how to configure Enterprise Search inside a +Docker container: [Running Enterprise Search Using Docker][enterprisesearch-docker]. + +## See also + +[Enterprise Search documentation][enterprisesearch-docs] + +[config-enterprisesearch]: ./config/enterprise-search.yml + +[enterprisesearch-encryption]: https://www.elastic.co/guide/en/enterprise-search/current/encryption-keys.html +[enterprisesearch-security]: https://www.elastic.co/guide/en/workplace-search/current/workplace-search-security.html +[enterprisesearch-config]: https://www.elastic.co/guide/en/enterprise-search/current/configuration.html +[enterprisesearch-docker]: https://www.elastic.co/guide/en/enterprise-search/current/docker.html +[enterprisesearch-docs]: https://www.elastic.co/guide/en/enterprise-search/current/index.html +[enterprisesearch-kb]: https://www.elastic.co/guide/en/kibana/current/enterprise-search-settings-kb.html + +[es-security]: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#api-key-service-settings +[config-es]: ../../elasticsearch/config/elasticsearch.yml +[config-kbn]: ../../kibana/config/kibana.yml diff --git a/extensions/enterprise-search/config/enterprise-search.yml b/extensions/enterprise-search/config/enterprise-search.yml new file mode 100644 index 0000000..a1f098d --- /dev/null +++ b/extensions/enterprise-search/config/enterprise-search.yml @@ -0,0 +1,28 @@ +--- +## Enterprise Search core configuration +## https://www.elastic.co/guide/en/enterprise-search/current/configuration.html +# + +## --------------------- REQUIRED --------------------- + +# Encryption keys to protect application secrets. +secret_management.encryption_keys: + # example: + #- 680f94e568c90364bedf927b2f0f49609702d3eab9098688585a375b14274546 + +## ---------------------------------------------------- + +# IP address Enterprise Search listens on +ent_search.listen_host: 0.0.0.0 + +# URL at which users reach Enterprise Search / Kibana +ent_search.external_url: http://localhost:3002 +kibana.host: http://localhost:5601 + +# Elasticsearch URL and credentials +elasticsearch.host: http://elasticsearch:9200 +elasticsearch.username: elastic +elasticsearch.password: ${ELASTIC_PASSWORD} + +# Allow Enterprise Search to modify Elasticsearch settings. Used to enable auto-creation of Elasticsearch indexes. +allow_es_settings_modification: true diff --git a/extensions/enterprise-search/enterprise-search-compose.yml b/extensions/enterprise-search/enterprise-search-compose.yml new file mode 100644 index 0000000..84a0011 --- /dev/null +++ b/extensions/enterprise-search/enterprise-search-compose.yml @@ -0,0 +1,18 @@ +services: + enterprise-search: + build: + context: extensions/enterprise-search/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + volumes: + - ./extensions/enterprise-search/config/enterprise-search.yml:/usr/share/enterprise-search/config/enterprise-search.yml:ro,Z + environment: + JAVA_OPTS: -Xms2g -Xmx2g + ENT_SEARCH_DEFAULT_PASSWORD: 'changeme' + ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-} + ports: + - 3002:3002 + networks: + - elk + depends_on: + - elasticsearch diff --git a/extensions/filebeat/.dockerignore b/extensions/filebeat/.dockerignore new file mode 100644 index 0000000..37eef9d --- /dev/null +++ b/extensions/filebeat/.dockerignore @@ -0,0 +1,6 @@ +# Ignore Docker build files +Dockerfile +.dockerignore + +# Ignore OS artifacts +**/.DS_Store diff --git a/extensions/filebeat/Dockerfile b/extensions/filebeat/Dockerfile new file mode 100644 index 0000000..d1001bd --- /dev/null +++ b/extensions/filebeat/Dockerfile @@ -0,0 +1,3 @@ +ARG ELASTIC_VERSION + +FROM docker.elastic.co/beats/filebeat:${ELASTIC_VERSION:-8.15.1} diff --git a/extensions/filebeat/README.md b/extensions/filebeat/README.md new file mode 100644 index 0000000..b1fcb09 --- /dev/null +++ b/extensions/filebeat/README.md @@ -0,0 +1,42 @@ +# Filebeat + +Filebeat is a lightweight shipper for forwarding and centralizing log data. Installed as an agent on your servers, +Filebeat monitors the log files or locations that you specify, collects log events, and forwards them either to +Elasticsearch or Logstash for indexing. + +## Usage + +**This extension requires the `filebeat_internal` and `beats_system` users to be created and initialized with a +password.** In case you haven't done that during the initial startup of the stack, please refer to [How to re-execute +the setup][setup] to run the setup container again and initialize these users. + +To include Filebeat in the stack, run Docker Compose from the root of the repository with an additional command line +argument referencing the `filebeat-compose.yml` file: + +```console +$ docker compose -f docker-compose.yml -f extensions/filebeat/filebeat-compose.yml up +``` + +## Configuring Filebeat + +The Filebeat configuration is stored in [`config/filebeat.yml`](./config/filebeat.yml). You can modify this file with +the help of the [Configuration reference][filebeat-config]. + +Any change to the Filebeat configuration requires a restart of the Filebeat container: + +```console +$ docker compose -f docker-compose.yml -f extensions/filebeat/filebeat-compose.yml restart filebeat +``` + +Please refer to the following documentation page for more details about how to configure Filebeat inside a Docker +container: [Run Filebeat on Docker][filebeat-docker]. + +## See also + +[Filebeat documentation][filebeat-doc] + +[filebeat-config]: https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-reference-yml.html +[filebeat-docker]: https://www.elastic.co/guide/en/beats/filebeat/current/running-on-docker.html +[filebeat-doc]: https://www.elastic.co/guide/en/beats/filebeat/current/index.html + +[setup]: ../../README.md#how-to-re-execute-the-setup diff --git a/extensions/filebeat/config/filebeat.yml b/extensions/filebeat/config/filebeat.yml new file mode 100644 index 0000000..119d5d5 --- /dev/null +++ b/extensions/filebeat/config/filebeat.yml @@ -0,0 +1,54 @@ +## Filebeat configuration +## https://github.com/elastic/beats/blob/main/deploy/docker/filebeat.docker.yml +# + +name: filebeat + +filebeat.config: + modules: + path: ${path.config}/modules.d/*.yml + reload.enabled: false + +filebeat.autodiscover: + providers: + # The Docker autodiscover provider automatically retrieves logs from Docker + # containers as they start and stop. + - type: docker + hints.enabled: true + hints.default_config: + type: container + paths: + - /var/lib/docker/containers/${data.container.id}/*-json.log + templates: + - condition: + contains: + docker.container.image: elasticsearch + config: + - module: elasticsearch + server: + input: + type: container + paths: + - /var/lib/docker/containers/${data.container.id}/*-json.log + +processors: + - add_cloud_metadata: ~ + +monitoring: + enabled: true + elasticsearch: + username: beats_system + password: ${BEATS_SYSTEM_PASSWORD} + +output.elasticsearch: + hosts: [ http://elasticsearch:9200 ] + username: filebeat_internal + password: ${FILEBEAT_INTERNAL_PASSWORD} + +## HTTP endpoint for health checking +## https://www.elastic.co/guide/en/beats/filebeat/current/http-endpoint.html +# + +http: + enabled: true + host: 0.0.0.0 diff --git a/extensions/filebeat/filebeat-compose.yml b/extensions/filebeat/filebeat-compose.yml new file mode 100644 index 0000000..595a7d4 --- /dev/null +++ b/extensions/filebeat/filebeat-compose.yml @@ -0,0 +1,33 @@ +services: + filebeat: + build: + context: extensions/filebeat/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + # Run as 'root' instead of 'filebeat' (uid 1000) to allow reading + # 'docker.sock' and the host's filesystem. + user: root + command: + # Log to stderr. + - -e + # Disable config file permissions checks. Allows mounting + # 'config/filebeat.yml' even if it's not owned by root. + # see: https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html + - --strict.perms=false + volumes: + - ./extensions/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro,Z + - type: bind + source: /var/lib/docker/containers + target: /var/lib/docker/containers + read_only: true + - type: bind + source: /var/run/docker.sock + target: /var/run/docker.sock + read_only: true + environment: + FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-} + BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-} + networks: + - elk + depends_on: + - elasticsearch diff --git a/extensions/fleet/.dockerignore b/extensions/fleet/.dockerignore new file mode 100644 index 0000000..37eef9d --- /dev/null +++ b/extensions/fleet/.dockerignore @@ -0,0 +1,6 @@ +# Ignore Docker build files +Dockerfile +.dockerignore + +# Ignore OS artifacts +**/.DS_Store diff --git a/extensions/fleet/Dockerfile b/extensions/fleet/Dockerfile new file mode 100644 index 0000000..acfe9da --- /dev/null +++ b/extensions/fleet/Dockerfile @@ -0,0 +1,8 @@ +ARG ELASTIC_VERSION + +FROM docker.elastic.co/beats/elastic-agent:${ELASTIC_VERSION:-8.15.1} + +# Ensure the 'state' directory exists and is owned by the 'elastic-agent' user, +# otherwise mounting a named volume in that location creates a directory owned +# by root:root which the 'elastic-agent' user isn't allowed to write to. +RUN mkdir state diff --git a/extensions/fleet/README.md b/extensions/fleet/README.md new file mode 100644 index 0000000..bd8efde --- /dev/null +++ b/extensions/fleet/README.md @@ -0,0 +1,62 @@ +# Fleet Server + +> [!WARNING] +> This extension currently exists for preview purposes and should be considered **EXPERIMENTAL**. Expect regular changes +> to the default Fleet settings, both in the Elastic Agent and Kibana. +> +> See [Known Issues](#known-issues) for a list of issues that need to be addressed before this extension can be +> considered functional. + +Fleet provides central management capabilities for [Elastic Agents][fleet-doc] via an API and web UI served by Kibana, +with Elasticsearch acting as the communication layer. +Fleet Server is the central component which allows connecting Elastic Agents to the Fleet. + +## Requirements + +The Fleet Server exposes the TCP port `8220` for Agent to Server communications. + +## Usage + +To include Fleet Server in the stack, run Docker Compose from the root of the repository with an additional command line +argument referencing the `fleet-compose.yml` file: + +```console +$ docker compose -f docker-compose.yml -f extensions/fleet/fleet-compose.yml up +``` + +## Configuring Fleet Server + +Fleet Server — like any Elastic Agent — is configured via [Agent Policies][fleet-pol] which can be either managed +through the Fleet management UI in Kibana, or statically pre-configured inside the Kibana configuration file. + +To ease the enrollment of Fleet Server in this extension, docker-elk comes with a pre-configured Agent Policy for Fleet +Server defined inside [`kibana/config/kibana.yml`][config-kbn]. + +Please refer to the following documentation page for more details about configuring Fleet Server through the Fleet +management UI: [Fleet UI Settings][fleet-cfg]. + +## Known Issues + +- The Elastic Agent auto-enrolls using the `elastic` super-user. With this approach, you do not need to generate a + service token — either using the Fleet management UI or [CLI utility][es-svc-token] — prior to starting this + extension. However convenient that is, this approach _does not follow security best practices_, and we recommend + generating a service token for Fleet Server instead. + +## See also + +[Fleet and Elastic Agent Guide][fleet-doc] + +## Screenshots + +![fleet-agents](https://user-images.githubusercontent.com/3299086/202701399-27518fe4-17b7-49d1-aefb-868dffeaa68a.png +"Fleet Agents") +![elastic-agent-dashboard](https://user-images.githubusercontent.com/3299086/202701404-958f8d80-a7a0-4044-bbf9-bf73f3bdd17a.png +"Elastic Agent Dashboard") + +[fleet-doc]: https://www.elastic.co/guide/en/fleet/current/fleet-overview.html +[fleet-pol]: https://www.elastic.co/guide/en/fleet/current/agent-policy.html +[fleet-cfg]: https://www.elastic.co/guide/en/fleet/current/fleet-settings.html + +[config-kbn]: ../../kibana/config/kibana.yml + +[es-svc-token]: https://www.elastic.co/guide/en/elasticsearch/reference/current/service-tokens-command.html diff --git a/extensions/fleet/agent-apmserver-compose.yml b/extensions/fleet/agent-apmserver-compose.yml new file mode 100644 index 0000000..3becc09 --- /dev/null +++ b/extensions/fleet/agent-apmserver-compose.yml @@ -0,0 +1,43 @@ +# Example of Fleet-enrolled Elastic Agent pre-configured with an agent policy +# for running the APM Server integration (see kibana.yml). +# +# Run with +# docker compose \ +# -f docker-compose.yml \ +# -f extensions/fleet/fleet-compose.yml \ +# -f extensions/fleet/agent-apmserver-compose.yml \ +# up + +services: + apm-server: + build: + context: extensions/fleet/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + volumes: + - apm-server:/usr/share/elastic-agent/state:Z + environment: + FLEET_ENROLL: '1' + FLEET_TOKEN_POLICY_NAME: Agent Policy APM Server + FLEET_INSECURE: '1' + FLEET_URL: http://fleet-server:8220 + # Enrollment. + # (a) Auto-enroll using basic authentication + ELASTICSEARCH_USERNAME: elastic + ELASTICSEARCH_PASSWORD: ${ELASTIC_PASSWORD:-} + # (b) Enroll using a pre-generated enrollment token + #FLEET_ENROLLMENT_TOKEN: + ports: + - 8200:8200 + hostname: apm-server + # Elastic Agent does not retry failed connections to Kibana upon the initial enrollment phase. + restart: on-failure + networks: + - elk + depends_on: + - elasticsearch + - kibana + - fleet-server + +volumes: + apm-server: diff --git a/extensions/fleet/fleet-compose.yml b/extensions/fleet/fleet-compose.yml new file mode 100644 index 0000000..a81d33b --- /dev/null +++ b/extensions/fleet/fleet-compose.yml @@ -0,0 +1,45 @@ +services: + fleet-server: + build: + context: extensions/fleet/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + # Run as 'root' instead of 'elastic-agent' (uid 1000) to allow reading + # 'docker.sock' and the host's filesystem. + user: root + volumes: + - fleet-server:/usr/share/elastic-agent/state:Z + - type: bind + source: /var/lib/docker/containers + target: /var/lib/docker/containers + read_only: true + - type: bind + source: /var/run/docker.sock + target: /var/run/docker.sock + read_only: true + environment: + FLEET_SERVER_ENABLE: '1' + FLEET_SERVER_INSECURE_HTTP: '1' + FLEET_SERVER_HOST: 0.0.0.0 + FLEET_SERVER_POLICY_ID: fleet-server-policy + # Fleet plugin in Kibana + KIBANA_FLEET_SETUP: '1' + # Enrollment. + # (a) Auto-enroll using basic authentication + ELASTICSEARCH_USERNAME: elastic + ELASTICSEARCH_PASSWORD: ${ELASTIC_PASSWORD:-} + # (b) Enroll using a pre-generated service token + #FLEET_SERVER_SERVICE_TOKEN: + ports: + - 8220:8220 + hostname: fleet-server + # Elastic Agent does not retry failed connections to Kibana upon the initial enrollment phase. + restart: on-failure + networks: + - elk + depends_on: + - elasticsearch + - kibana + +volumes: + fleet-server: diff --git a/extensions/heartbeat/.dockerignore b/extensions/heartbeat/.dockerignore new file mode 100644 index 0000000..37eef9d --- /dev/null +++ b/extensions/heartbeat/.dockerignore @@ -0,0 +1,6 @@ +# Ignore Docker build files +Dockerfile +.dockerignore + +# Ignore OS artifacts +**/.DS_Store diff --git a/extensions/heartbeat/Dockerfile b/extensions/heartbeat/Dockerfile new file mode 100644 index 0000000..7731d66 --- /dev/null +++ b/extensions/heartbeat/Dockerfile @@ -0,0 +1,3 @@ +ARG ELASTIC_VERSION + +FROM docker.elastic.co/beats/heartbeat:${ELASTIC_VERSION:-8.15.1} diff --git a/extensions/heartbeat/README.md b/extensions/heartbeat/README.md new file mode 100644 index 0000000..64a761b --- /dev/null +++ b/extensions/heartbeat/README.md @@ -0,0 +1,41 @@ +# Heartbeat + +Heartbeat is a lightweight daemon that periodically checks the status of your services and determines whether they are +available. + +## Usage + +**This extension requires the `heartbeat_internal` and `beats_system` users to be created and initialized with a +password.** In case you haven't done that during the initial startup of the stack, please refer to [How to re-execute +the setup][setup] to run the setup container again and initialize these users. + +To include Heartbeat in the stack, run Docker Compose from the root of the repository with an additional command line +argument referencing the `heartbeat-compose.yml` file: + +```console +$ docker compose -f docker-compose.yml -f extensions/heartbeat/heartbeat-compose.yml up +``` + +## Configuring Heartbeat + +The Heartbeat configuration is stored in [`config/heartbeat.yml`](./config/heartbeat.yml). You can modify this file +with the help of the [Configuration reference][heartbeat-config]. + +Any change to the Heartbeat configuration requires a restart of the Heartbeat container: + +```console +$ docker compose -f docker-compose.yml -f extensions/heartbeat/heartbeat-compose.yml restart heartbeat +``` + +Please refer to the following documentation page for more details about how to configure Heartbeat inside a +Docker container: [Run Heartbeat on Docker][heartbeat-docker]. + +## See also + +[Heartbeat documentation][heartbeat-doc] + +[heartbeat-config]: https://www.elastic.co/guide/en/beats/heartbeat/current/heartbeat-reference-yml.html +[heartbeat-docker]: https://www.elastic.co/guide/en/beats/heartbeat/current/running-on-docker.html +[heartbeat-doc]: https://www.elastic.co/guide/en/beats/heartbeat/current/index.html + +[setup]: ../../README.md#how-to-re-execute-the-setup diff --git a/extensions/heartbeat/config/heartbeat.yml b/extensions/heartbeat/config/heartbeat.yml new file mode 100644 index 0000000..b1416ea --- /dev/null +++ b/extensions/heartbeat/config/heartbeat.yml @@ -0,0 +1,40 @@ +## Heartbeat configuration +## https://github.com/elastic/beats/blob/main/deploy/docker/heartbeat.docker.yml +# + +name: heartbeat + +heartbeat.monitors: +- type: http + schedule: '@every 5s' + urls: + - http://elasticsearch:9200 + username: heartbeat_internal + password: ${HEARTBEAT_INTERNAL_PASSWORD} + +- type: icmp + schedule: '@every 5s' + hosts: + - elasticsearch + +processors: +- add_cloud_metadata: ~ + +monitoring: + enabled: true + elasticsearch: + username: beats_system + password: ${BEATS_SYSTEM_PASSWORD} + +output.elasticsearch: + hosts: [ http://elasticsearch:9200 ] + username: heartbeat_internal + password: ${HEARTBEAT_INTERNAL_PASSWORD} + +## HTTP endpoint for health checking +## https://www.elastic.co/guide/en/beats/heartbeat/current/http-endpoint.html +# + +http: + enabled: true + host: 0.0.0.0 diff --git a/extensions/heartbeat/heartbeat-compose.yml b/extensions/heartbeat/heartbeat-compose.yml new file mode 100644 index 0000000..103d0df --- /dev/null +++ b/extensions/heartbeat/heartbeat-compose.yml @@ -0,0 +1,22 @@ +services: + heartbeat: + build: + context: extensions/heartbeat/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + command: + # Log to stderr. + - -e + # Disable config file permissions checks. Allows mounting + # 'config/heartbeat.yml' even if it's not owned by root. + # see: https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html + - --strict.perms=false + volumes: + - ./extensions/heartbeat/config/heartbeat.yml:/usr/share/heartbeat/heartbeat.yml:ro,Z + environment: + HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-} + BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-} + networks: + - elk + depends_on: + - elasticsearch diff --git a/extensions/metricbeat/.dockerignore b/extensions/metricbeat/.dockerignore new file mode 100644 index 0000000..37eef9d --- /dev/null +++ b/extensions/metricbeat/.dockerignore @@ -0,0 +1,6 @@ +# Ignore Docker build files +Dockerfile +.dockerignore + +# Ignore OS artifacts +**/.DS_Store diff --git a/extensions/metricbeat/Dockerfile b/extensions/metricbeat/Dockerfile new file mode 100644 index 0000000..b9313e4 --- /dev/null +++ b/extensions/metricbeat/Dockerfile @@ -0,0 +1,3 @@ +ARG ELASTIC_VERSION + +FROM docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION:-8.15.1} diff --git a/extensions/metricbeat/README.md b/extensions/metricbeat/README.md new file mode 100644 index 0000000..2d042ba --- /dev/null +++ b/extensions/metricbeat/README.md @@ -0,0 +1,49 @@ +# Metricbeat + +Metricbeat is a lightweight shipper that you can install on your servers to periodically collect metrics from the +operating system and from services running on the server. Metricbeat takes the metrics and statistics that it collects +and ships them to the output that you specify, such as Elasticsearch or Logstash. + +## Usage + +**This extension requires the `metricbeat_internal`, `monitoring_internal` and `beats_system` users to be created and +initialized with a password.** In case you haven't done that during the initial startup of the stack, please refer to +[How to re-execute the setup][setup] to run the setup container again and initialize these users. + +To include Metricbeat in the stack, run Docker Compose from the root of the repository with an additional command line +argument referencing the `metricbeat-compose.yml` file: + +```console +$ docker compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml up +``` + +## Configuring Metricbeat + +The Metricbeat configuration is stored in [`config/metricbeat.yml`](./config/metricbeat.yml). You can modify this file +with the help of the [Configuration reference][metricbeat-config]. + +Any change to the Metricbeat configuration requires a restart of the Metricbeat container: + +```console +$ docker compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml restart metricbeat +``` + +Please refer to the following documentation page for more details about how to configure Metricbeat inside a +Docker container: [Run Metricbeat on Docker][metricbeat-docker]. + +## See also + +[Metricbeat documentation][metricbeat-doc] + +## Screenshots + +![stack-monitoring](https://user-images.githubusercontent.com/3299086/202710574-32a3d419-86ea-4334-b6f7-62d7826df18d.png +"Stack Monitoring") +![host-dashboard](https://user-images.githubusercontent.com/3299086/202710594-0deccf40-3a9a-4e63-8411-2e0d9cc6ad3a.png +"Host Overview Dashboard") + +[metricbeat-config]: https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-reference-yml.html +[metricbeat-docker]: https://www.elastic.co/guide/en/beats/metricbeat/current/running-on-docker.html +[metricbeat-doc]: https://www.elastic.co/guide/en/beats/metricbeat/current/index.html + +[setup]: ../../README.md#how-to-re-execute-the-setup diff --git a/extensions/metricbeat/config/metricbeat.yml b/extensions/metricbeat/config/metricbeat.yml new file mode 100644 index 0000000..1c2b6cb --- /dev/null +++ b/extensions/metricbeat/config/metricbeat.yml @@ -0,0 +1,72 @@ +## Metricbeat configuration +## https://github.com/elastic/beats/blob/main/deploy/docker/metricbeat.docker.yml +# + +name: metricbeat + +metricbeat.config: + modules: + path: ${path.config}/modules.d/*.yml + # Reload module configs as they change: + reload.enabled: false + +metricbeat.autodiscover: + providers: + - type: docker + hints.enabled: true + +metricbeat.modules: +- module: elasticsearch + hosts: [ http://elasticsearch:9200 ] + username: monitoring_internal + password: ${MONITORING_INTERNAL_PASSWORD} + xpack.enabled: true + period: 10s + enabled: true +- module: logstash + hosts: [ http://logstash:9600 ] + xpack.enabled: true + period: 10s + enabled: true +- module: kibana + hosts: [ http://kibana:5601 ] + username: monitoring_internal + password: ${MONITORING_INTERNAL_PASSWORD} + xpack.enabled: true + period: 10s + enabled: true +- module: docker + metricsets: + - container + - cpu + - diskio + - healthcheck + - info + #- image + - memory + - network + hosts: [ unix:///var/run/docker.sock ] + period: 10s + enabled: true + +processors: + - add_cloud_metadata: ~ + +monitoring: + enabled: true + elasticsearch: + username: beats_system + password: ${BEATS_SYSTEM_PASSWORD} + +output.elasticsearch: + hosts: [ http://elasticsearch:9200 ] + username: metricbeat_internal + password: ${METRICBEAT_INTERNAL_PASSWORD} + +## HTTP endpoint for health checking +## https://www.elastic.co/guide/en/beats/metricbeat/current/http-endpoint.html +# + +http: + enabled: true + host: 0.0.0.0 diff --git a/extensions/metricbeat/metricbeat-compose.yml b/extensions/metricbeat/metricbeat-compose.yml new file mode 100644 index 0000000..e7aa67f --- /dev/null +++ b/extensions/metricbeat/metricbeat-compose.yml @@ -0,0 +1,45 @@ +services: + metricbeat: + build: + context: extensions/metricbeat/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + # Run as 'root' instead of 'metricbeat' (uid 1000) to allow reading + # 'docker.sock' and the host's filesystem. + user: root + command: + # Log to stderr. + - -e + # Disable config file permissions checks. Allows mounting + # 'config/metricbeat.yml' even if it's not owned by root. + # see: https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html + - --strict.perms=false + # Mount point of the host’s filesystem. Required to monitor the host + # from within a container. + - --system.hostfs=/hostfs + volumes: + - ./extensions/metricbeat/config/metricbeat.yml:/usr/share/metricbeat/metricbeat.yml:ro,Z + - type: bind + source: / + target: /hostfs + read_only: true + - type: bind + source: /sys/fs/cgroup + target: /hostfs/sys/fs/cgroup + read_only: true + - type: bind + source: /proc + target: /hostfs/proc + read_only: true + - type: bind + source: /var/run/docker.sock + target: /var/run/docker.sock + read_only: true + environment: + METRICBEAT_INTERNAL_PASSWORD: ${METRICBEAT_INTERNAL_PASSWORD:-} + MONITORING_INTERNAL_PASSWORD: ${MONITORING_INTERNAL_PASSWORD:-} + BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-} + networks: + - elk + depends_on: + - elasticsearch diff --git a/kibana/.dockerignore b/kibana/.dockerignore new file mode 100644 index 0000000..37eef9d --- /dev/null +++ b/kibana/.dockerignore @@ -0,0 +1,6 @@ +# Ignore Docker build files +Dockerfile +.dockerignore + +# Ignore OS artifacts +**/.DS_Store diff --git a/kibana/Dockerfile b/kibana/Dockerfile new file mode 100644 index 0000000..7e4c501 --- /dev/null +++ b/kibana/Dockerfile @@ -0,0 +1,7 @@ +ARG ELASTIC_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/kibana/kibana:${ELASTIC_VERSION:-8.15.1} + +# Add your kibana plugins setup here +# Example: RUN kibana-plugin install diff --git a/kibana/config/kibana.yml b/kibana/config/kibana.yml new file mode 100644 index 0000000..ef3f024 --- /dev/null +++ b/kibana/config/kibana.yml @@ -0,0 +1,99 @@ +--- +## Default Kibana configuration from Kibana base image. +## https://github.com/elastic/kibana/blob/main/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.ts +# +server.name: kibana +server.host: 0.0.0.0 +elasticsearch.hosts: [ http://elasticsearch:9200 ] + +monitoring.ui.container.elasticsearch.enabled: true +monitoring.ui.container.logstash.enabled: true + +## X-Pack security credentials +# +elasticsearch.username: kibana_system +elasticsearch.password: ${KIBANA_SYSTEM_PASSWORD} + +## Encryption keys (optional but highly recommended) +## +## Generate with either +## $ docker container run --rm docker.elastic.co/kibana/kibana:8.6.2 bin/kibana-encryption-keys generate +## $ openssl rand -hex 32 +## +## https://www.elastic.co/guide/en/kibana/current/using-kibana-with-security.html +## https://www.elastic.co/guide/en/kibana/current/kibana-encryption-keys.html +# +#xpack.security.encryptionKey: +#xpack.encryptedSavedObjects.encryptionKey: +#xpack.reporting.encryptionKey: + +## Fleet +## https://www.elastic.co/guide/en/kibana/current/fleet-settings-kb.html +# +xpack.fleet.agents.fleet_server.hosts: [ http://fleet-server:8220 ] + +xpack.fleet.outputs: + - id: fleet-default-output + name: default + type: elasticsearch + hosts: [ http://elasticsearch:9200 ] + is_default: true + is_default_monitoring: true + +xpack.fleet.packages: + - name: fleet_server + version: latest + - name: system + version: latest + - name: elastic_agent + version: latest + - name: docker + version: latest + - name: apm + version: latest + +xpack.fleet.agentPolicies: + - name: Fleet Server Policy + id: fleet-server-policy + description: Static agent policy for Fleet Server + monitoring_enabled: + - logs + - metrics + package_policies: + - name: fleet_server-1 + package: + name: fleet_server + - name: system-1 + package: + name: system + - name: elastic_agent-1 + package: + name: elastic_agent + - name: docker-1 + package: + name: docker + - name: Agent Policy APM Server + id: agent-policy-apm-server + description: Static agent policy for the APM Server integration + monitoring_enabled: + - logs + - metrics + package_policies: + - name: system-1 + package: + name: system + - name: elastic_agent-1 + package: + name: elastic_agent + - name: apm-1 + package: + name: apm + # See the APM package manifest for a list of possible inputs. + # https://github.com/elastic/apm-server/blob/v8.5.0/apmpackage/apm/manifest.yml#L41-L168 + inputs: + - type: apm + vars: + - name: host + value: 0.0.0.0:8200 + - name: url + value: http://apm-server:8200 diff --git a/logstash/.dockerignore b/logstash/.dockerignore new file mode 100644 index 0000000..37eef9d --- /dev/null +++ b/logstash/.dockerignore @@ -0,0 +1,6 @@ +# Ignore Docker build files +Dockerfile +.dockerignore + +# Ignore OS artifacts +**/.DS_Store diff --git a/logstash/Dockerfile b/logstash/Dockerfile new file mode 100644 index 0000000..348ca41 --- /dev/null +++ b/logstash/Dockerfile @@ -0,0 +1,7 @@ +ARG ELASTIC_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/logstash/logstash:${ELASTIC_VERSION:-8.15.1} + +# Add your logstash plugins setup here +# Example: RUN logstash-plugin install logstash-filter-json diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml new file mode 100644 index 0000000..a81b89b --- /dev/null +++ b/logstash/config/logstash.yml @@ -0,0 +1,7 @@ +--- +## Default Logstash configuration from Logstash base image. +## https://github.com/elastic/logstash/blob/main/docker/data/logstash/config/logstash-full.yml +# +http.host: 0.0.0.0 + +node.name: logstash diff --git a/logstash/pipeline/logstash.conf b/logstash/pipeline/logstash.conf new file mode 100644 index 0000000..5ac8861 --- /dev/null +++ b/logstash/pipeline/logstash.conf @@ -0,0 +1,19 @@ +input { + beats { + port => 5044 + } + + tcp { + port => 50000 + } +} + +## Add your filters / logstash plugins configuration here + +output { + elasticsearch { + hosts => "elasticsearch:9200" + user => "logstash_internal" + password => "${LOGSTASH_INTERNAL_PASSWORD}" + } +} diff --git a/setup/.dockerignore b/setup/.dockerignore new file mode 100644 index 0000000..c5dd1c8 --- /dev/null +++ b/setup/.dockerignore @@ -0,0 +1,9 @@ +# Ignore Docker build files +Dockerfile +.dockerignore + +# Ignore OS artifacts +**/.DS_Store + +# Ignore Git files +.gitignore diff --git a/setup/Dockerfile b/setup/Dockerfile new file mode 100644 index 0000000..86254a1 --- /dev/null +++ b/setup/Dockerfile @@ -0,0 +1,6 @@ +ARG ELASTIC_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION:-8.15.1} + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/setup/entrypoint.sh b/setup/entrypoint.sh new file mode 100755 index 0000000..ac79321 --- /dev/null +++ b/setup/entrypoint.sh @@ -0,0 +1,119 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + +source "${BASH_SOURCE[0]%/*}"/lib.sh + + +# -------------------------------------------------------- +# Users declarations + +declare -A users_passwords +users_passwords=( + [logstash_internal]="${LOGSTASH_INTERNAL_PASSWORD:-}" + [kibana_system]="${KIBANA_SYSTEM_PASSWORD:-}" + [metricbeat_internal]="${METRICBEAT_INTERNAL_PASSWORD:-}" + [filebeat_internal]="${FILEBEAT_INTERNAL_PASSWORD:-}" + [heartbeat_internal]="${HEARTBEAT_INTERNAL_PASSWORD:-}" + [monitoring_internal]="${MONITORING_INTERNAL_PASSWORD:-}" + [beats_system]="${BEATS_SYSTEM_PASSWORD=:-}" +) + +declare -A users_roles +users_roles=( + [logstash_internal]='logstash_writer' + [metricbeat_internal]='metricbeat_writer' + [filebeat_internal]='filebeat_writer' + [heartbeat_internal]='heartbeat_writer' + [monitoring_internal]='remote_monitoring_collector' +) + +# -------------------------------------------------------- +# Roles declarations + +declare -A roles_files +roles_files=( + [logstash_writer]='logstash_writer.json' + [metricbeat_writer]='metricbeat_writer.json' + [filebeat_writer]='filebeat_writer.json' + [heartbeat_writer]='heartbeat_writer.json' +) + +# -------------------------------------------------------- + + +log 'Waiting for availability of Elasticsearch. This can take several minutes.' + +declare -i exit_code=0 +wait_for_elasticsearch || exit_code=$? + +if ((exit_code)); then + case $exit_code in + 6) + suberr 'Could not resolve host. Is Elasticsearch running?' + ;; + 7) + suberr 'Failed to connect to host. Is Elasticsearch healthy?' + ;; + 28) + suberr 'Timeout connecting to host. Is Elasticsearch healthy?' + ;; + *) + suberr "Connection to Elasticsearch failed. Exit code: ${exit_code}" + ;; + esac + + exit $exit_code +fi + +sublog 'Elasticsearch is running' + +log 'Waiting for initialization of built-in users' + +wait_for_builtin_users || exit_code=$? + +if ((exit_code)); then + suberr 'Timed out waiting for condition' + exit $exit_code +fi + +sublog 'Built-in users were initialized' + +for role in "${!roles_files[@]}"; do + log "Role '$role'" + + declare body_file + body_file="${BASH_SOURCE[0]%/*}/roles/${roles_files[$role]:-}" + if [[ ! -f "${body_file:-}" ]]; then + sublog "No role body found at '${body_file}', skipping" + continue + fi + + sublog 'Creating/updating' + ensure_role "$role" "$(<"${body_file}")" +done + +for user in "${!users_passwords[@]}"; do + log "User '$user'" + if [[ -z "${users_passwords[$user]:-}" ]]; then + sublog 'No password defined, skipping' + continue + fi + + declare -i user_exists=0 + user_exists="$(check_user_exists "$user")" + + if ((user_exists)); then + sublog 'User exists, setting password' + set_user_password "$user" "${users_passwords[$user]}" + else + if [[ -z "${users_roles[$user]:-}" ]]; then + suberr ' No role defined, skipping creation' + continue + fi + + sublog 'User does not exist, creating' + create_user "$user" "${users_passwords[$user]}" "${users_roles[$user]}" + fi +done diff --git a/setup/lib.sh b/setup/lib.sh new file mode 100644 index 0000000..7e635c6 --- /dev/null +++ b/setup/lib.sh @@ -0,0 +1,240 @@ +#!/usr/bin/env bash + +# Log a message. +function log { + echo "[+] $1" +} + +# Log a message at a sub-level. +function sublog { + echo " ⠿ $1" +} + +# Log an error. +function err { + echo "[x] $1" >&2 +} + +# Log an error at a sub-level. +function suberr { + echo " ⠍ $1" >&2 +} + +# Poll the 'elasticsearch' service until it responds with HTTP code 200. +function wait_for_elasticsearch { + local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}" + + local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' "http://${elasticsearch_host}:9200/" ) + + if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then + args+=( '-u' "elastic:${ELASTIC_PASSWORD}" ) + fi + + local -i result=1 + local output + + # retry for max 300s (60*5s) + for _ in $(seq 1 60); do + local -i exit_code=0 + output="$(curl "${args[@]}")" || exit_code=$? + + if ((exit_code)); then + result=$exit_code + fi + + if [[ "${output: -3}" -eq 200 ]]; then + result=0 + break + fi + + sleep 5 + done + + if ((result)) && [[ "${output: -3}" -ne 000 ]]; then + echo -e "\n${output::-3}" + fi + + return $result +} + +# Poll the Elasticsearch users API until it returns users. +function wait_for_builtin_users { + local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}" + + local -a args=( '-s' '-D-' '-m15' "http://${elasticsearch_host}:9200/_security/user?pretty" ) + + if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then + args+=( '-u' "elastic:${ELASTIC_PASSWORD}" ) + fi + + local -i result=1 + + local line + local -i exit_code + local -i num_users + + # retry for max 30s (30*1s) + for _ in $(seq 1 30); do + num_users=0 + + # read exits with a non-zero code if the last read input doesn't end + # with a newline character. The printf without newline that follows the + # curl command ensures that the final input not only contains curl's + # exit code, but causes read to fail so we can capture the return value. + # Ref. https://unix.stackexchange.com/a/176703/152409 + while IFS= read -r line || ! exit_code="$line"; do + if [[ "$line" =~ _reserved.+true ]]; then + (( num_users++ )) + fi + done < <(curl "${args[@]}"; printf '%s' "$?") + + if ((exit_code)); then + result=$exit_code + fi + + # we expect more than just the 'elastic' user in the result + if (( num_users > 1 )); then + result=0 + break + fi + + sleep 1 + done + + return $result +} + +# Verify that the given Elasticsearch user exists. +function check_user_exists { + local username=$1 + + local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}" + + local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' + "http://${elasticsearch_host}:9200/_security/user/${username}" + ) + + if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then + args+=( '-u' "elastic:${ELASTIC_PASSWORD}" ) + fi + + local -i result=1 + local -i exists=0 + local output + + output="$(curl "${args[@]}")" + if [[ "${output: -3}" -eq 200 || "${output: -3}" -eq 404 ]]; then + result=0 + fi + if [[ "${output: -3}" -eq 200 ]]; then + exists=1 + fi + + if ((result)); then + echo -e "\n${output::-3}" + else + echo "$exists" + fi + + return $result +} + +# Set password of a given Elasticsearch user. +function set_user_password { + local username=$1 + local password=$2 + + local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}" + + local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' + "http://${elasticsearch_host}:9200/_security/user/${username}/_password" + '-X' 'POST' + '-H' 'Content-Type: application/json' + '-d' "{\"password\" : \"${password}\"}" + ) + + if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then + args+=( '-u' "elastic:${ELASTIC_PASSWORD}" ) + fi + + local -i result=1 + local output + + output="$(curl "${args[@]}")" + if [[ "${output: -3}" -eq 200 ]]; then + result=0 + fi + + if ((result)); then + echo -e "\n${output::-3}\n" + fi + + return $result +} + +# Create the given Elasticsearch user. +function create_user { + local username=$1 + local password=$2 + local role=$3 + + local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}" + + local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' + "http://${elasticsearch_host}:9200/_security/user/${username}" + '-X' 'POST' + '-H' 'Content-Type: application/json' + '-d' "{\"password\":\"${password}\",\"roles\":[\"${role}\"]}" + ) + + if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then + args+=( '-u' "elastic:${ELASTIC_PASSWORD}" ) + fi + + local -i result=1 + local output + + output="$(curl "${args[@]}")" + if [[ "${output: -3}" -eq 200 ]]; then + result=0 + fi + + if ((result)); then + echo -e "\n${output::-3}\n" + fi + + return $result +} + +# Ensure that the given Elasticsearch role is up-to-date, create it if required. +function ensure_role { + local name=$1 + local body=$2 + + local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}" + + local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' + "http://${elasticsearch_host}:9200/_security/role/${name}" + '-X' 'POST' + '-H' 'Content-Type: application/json' + '-d' "$body" + ) + + if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then + args+=( '-u' "elastic:${ELASTIC_PASSWORD}" ) + fi + + local -i result=1 + local output + + output="$(curl "${args[@]}")" + if [[ "${output: -3}" -eq 200 ]]; then + result=0 + fi + + if ((result)); then + echo -e "\n${output::-3}\n" + fi + + return $result +} diff --git a/setup/roles/filebeat_writer.json b/setup/roles/filebeat_writer.json new file mode 100644 index 0000000..b24b873 --- /dev/null +++ b/setup/roles/filebeat_writer.json @@ -0,0 +1,20 @@ +{ + "cluster": [ + "manage_ilm", + "manage_index_templates", + "manage_ingest_pipelines", + "monitor", + "read_pipeline" + ], + "indices": [ + { + "names": [ + "filebeat-*" + ], + "privileges": [ + "create_doc", + "manage" + ] + } + ] +} diff --git a/setup/roles/heartbeat_writer.json b/setup/roles/heartbeat_writer.json new file mode 100644 index 0000000..9f64fa8 --- /dev/null +++ b/setup/roles/heartbeat_writer.json @@ -0,0 +1,18 @@ +{ + "cluster": [ + "manage_ilm", + "manage_index_templates", + "monitor" + ], + "indices": [ + { + "names": [ + "heartbeat-*" + ], + "privileges": [ + "create_doc", + "manage" + ] + } + ] +} diff --git a/setup/roles/logstash_writer.json b/setup/roles/logstash_writer.json new file mode 100644 index 0000000..b43861f --- /dev/null +++ b/setup/roles/logstash_writer.json @@ -0,0 +1,33 @@ +{ + "cluster": [ + "manage_index_templates", + "monitor", + "manage_ilm" + ], + "indices": [ + { + "names": [ + "logs-generic-default", + "logstash-*", + "ecs-logstash-*" + ], + "privileges": [ + "write", + "create", + "create_index", + "manage", + "manage_ilm" + ] + }, + { + "names": [ + "logstash", + "ecs-logstash" + ], + "privileges": [ + "write", + "manage" + ] + } + ] +} diff --git a/setup/roles/metricbeat_writer.json b/setup/roles/metricbeat_writer.json new file mode 100644 index 0000000..279308c --- /dev/null +++ b/setup/roles/metricbeat_writer.json @@ -0,0 +1,19 @@ +{ + "cluster": [ + "manage_ilm", + "manage_index_templates", + "monitor" + ], + "indices": [ + { + "names": [ + ".monitoring-*-mb", + "metricbeat-*" + ], + "privileges": [ + "create_doc", + "manage" + ] + } + ] +}