diff --git a/.env.example b/.env.example index 756b66a..28218ca 100644 --- a/.env.example +++ b/.env.example @@ -5,4 +5,15 @@ ELASTIC_PASSWORD=elastic_password KIBANA_PASSWORD=kibana_password # Set the cluster name -CLUSTER_NAME=docker-cluster \ No newline at end of file +CLUSTER_NAME=docker-cluster + +# ElasticApi key name +NEXT_PUBLIC_ELASTIC_API_KEY_NAME=cm2d_api_key + +# AWS SES credentials +AWS_ACCESS_KEY_ID=your_access_key_id_here +AWS_SECRET_ACCESS_KEY=your_secret_access_key_here +AWS_REGION=your_aws_region_here + +# The email address that sends the email +EMAIL_SOURCE=your_email_source_here diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..e9c8bbe --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,103 @@ +name: CI + +on: + workflow_dispatch: + push: + branches: + - main + +jobs: + elk-check-files-changed: + runs-on: ubuntu-latest + + outputs: + files-changed-elasticsearch: ${{ steps.set-output.outputs.files-changed-elasticsearch }} + files-changed-kibana: ${{ steps.set-output.outputs.files-changed-kibana }} + + steps: + - uses: actions/checkout@v3 + + - name: Get changed files in docker for elasticsearch + id: check-files-elasticsearch + uses: tj-actions/changed-files@v37 + with: + files: docker/elasticsearch + + - name: Get changed files in docker for kibana + id: check-files-kibana + uses: tj-actions/changed-files@v37 + with: + files: docker/kibana + + - name: Set output if files changed in docker for elasticsearch and kibana + id: set-output + run: | + echo "files-changed-elasticsearch=${{ steps.check-files-elasticsearch.outputs.any_changed }}" >> $GITHUB_OUTPUT + echo "files-changed-kibana=${{ steps.check-files-kibana.outputs.any_changed }}" >> $GITHUB_OUTPUT + + elasticsearch-build: + needs: elk-check-files-changed + if: ${{ needs.elk-check-files-changed.outputs.files-changed-elasticsearch == 'true' }} + runs-on: ubuntu-latest + + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push + uses: docker/build-push-action@v3 + with: + context: "{{defaultContext}}:docker/elasticsearch" + push: true + tags: cm2d/cm2d-elasticsearch:${{ github.sha }},cm2d/cm2d-elasticsearch:latest + + kibana-build: + needs: elk-check-files-changed + if: ${{ needs.elk-check-files-changed.outputs.files-changed-kibana == 'true' }} + runs-on: ubuntu-latest + + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push + uses: docker/build-push-action@v3 + with: + context: "{{defaultContext}}:docker/kibana" + push: true + tags: cm2d/cm2d-kibana:${{ github.sha }},cm2d/cm2d-kibana:latest + + webapp-build: + runs-on: ubuntu-latest + + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push + uses: docker/build-push-action@v3 + with: + context: "{{defaultContext}}:webapp-next" + push: true + tags: ghcr.io/socialgouv/cm2d-webapp:${{ github.sha }},ghcr.io/socialgouv/cm2d-webapp:latest diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index a7208b3..0000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: CI - -on: - push: - branches: ['main'] - pull_request: - branches: ['main'] - -jobs: - build: - runs-on: ubuntu-latest - - strategy: - matrix: - node-version: [18.x] - - steps: - - uses: actions/checkout@v3 - - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v3 - with: - node-version: ${{ matrix.node-version }} - - - name: Build - run: | - cd webapp-next - yarn - yarn build diff --git a/.gitignore b/.gitignore index 9248b73..d2c4cc9 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,5 @@ faker/sample_data.csv faker/.idea certificates/* sample_data.csv -.env \ No newline at end of file +.env +node_modules/ diff --git a/README.md b/README.md index 9c5c370..6a857e1 100644 --- a/README.md +++ b/README.md @@ -16,18 +16,17 @@ Pour démarrer la suite ELK localement : docker compose up -d ``` -Au premier run ELK, lancez cette suite de commande +Pour initialiser les variables d'environnement NextJS ``` -mkdir certificates -docker cp elasticsearch:/usr/share/elasticsearch/config/certs/ca/ca.crt ./certificates/ca.crt +cd webapp-next +cp .env.example .env ``` -Pour initialiser les variables d'environnement NextJS +Au premier run ELK, lancez cette commande pour initialiser le mot de passe du user "kibana_system" (remplacer {ELASTIC_PASSWORD} et {KIBANA_PASSWORD} par les mots de passe de votre environnement) : ``` -cd webapp-next -cp .env.example .env +docker exec elasticsearch curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:{ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://elasticsearch:9200/_security/user/kibana_system/_password -d "{\"password\":\"{KIBANA_PASSWORD}\"}" ``` Pour démarrer le frontend NextJS localement : @@ -82,75 +81,14 @@ Il est maintenant possible de se connecter en utilisant l'adresse email et le mo ## Initialisation de l'environnement ELK -### Indexation des certificats - -Création de l'index principal destiné à rassembler les informations relatives aux certificats. - -Rendez-vous dans "Management" > "Dev Tools" et lancez la requête suivante : - -``` -PUT /cm2d_certificate -{ - "mappings": { - "_meta": { - "created_by": "curl-user" - }, - "properties": { - "@timestamp": { - "type": "date" - }, - "age": { - "type": "long" - }, - "categories_level_1": { - "type": "keyword" - }, - "categories_level_2": { - "type": "keyword" - }, - "coordinates": { - "type": "keyword" - }, - "date": { - "type": "date", - "format": "iso8601" - }, - "death_location": { - "type": "keyword" - }, - "department": { - "type": "long" - }, - "home_location": { - "type": "keyword" - }, - "kind": { - "type": "keyword" - }, - "sex": { - "type": "keyword" - } - } - } -}' -``` - -### Index pour les attributs supplémentaires des utilisateurs - -Pour stocker des informations supplémentaires concernant les utilisateurs CM2D, nous devons créer un index dédié. - -Rendez-vous dans "Management" > "Dev Tools" et lancez la requête suivante : - -``` -PUT /cm2d_users -{ - "mappings": { - "properties": { - "username": { "type": "text" }, - "versionCGU": { "type": "text" } - } - } -} +### Indexation des certificats et des utilisateurs + +``` +docker run --net=host --rm -ti -e NODE_TLS_REJECT_UNAUTHORIZED=0 -v ./default-indexes:/tmp --entrypoint multielasticdump elasticdump/elasticsearch-dump \ + --direction=load \ + --input=./tmp \ + --output="https://elastic:${ELASTIC_PASSWORD}@localhost:9200" \ + --tlsAuth ``` ### Mise en place des transformations @@ -241,3 +179,40 @@ Destination Index : `cm2d_departments` Continous mode Date field for continous mode : `@timestamp` Delay : `60s` + + + +### Docker production + +## Créer les images docker +``` +docker build -t cm2d-elasticsearch docker/elasticsearch +docker build -t cm2d-kibana docker/kibana +docker build --build-arg NEXT_PUBLIC_ELASTIC_API_KEY_NAME=${NEXT_PUBLIC_ELASTIC_API_KEY_NAME} -t cm2d-webapp webapp-next +``` + +## Créer les réseaux docker +``` +docker network create elastic +docker network create webapp +``` + +## Elasticsearch +``` +docker run -d -p 9200:9200 -p 9300:9300 --net elastic -v es_data:/usr/share/elasticsearch/data -v certs:/usr/share/elasticsearch/config/certs -e ELASTIC_PASSWORD=${ELASTIC_PASSWORD} --name elasticsearch cm2d-elasticsearch +``` + +## Attacher le réseau webapp à Elasticsearch +``` +docker network connect webapp elasticsearch +``` + +## Kibana +``` +docker run -d -p 5601:5601 --net elastic -v kibana_data:/usr/share/kibana/data -v certs:/usr/share/kibana/config/certs -e ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD} --name kibana cm2d-kibana +``` + +## Webapp +``` +docker run -d -p 3000:3000 --net webapp -v certs:/app/certs --env-file ${path_fichier_environnement} --name webapp cm2d-webapp +``` \ No newline at end of file diff --git a/default-indexes/cm2d_certificate.mapping.json b/default-indexes/cm2d_certificate.mapping.json new file mode 100644 index 0000000..487aa57 --- /dev/null +++ b/default-indexes/cm2d_certificate.mapping.json @@ -0,0 +1 @@ +{"cm2d_certificate":{"mappings":{"_meta":{"created_by":"file-data-visualizer"},"properties":{"@timestamp":{"type":"date"},"age":{"type":"long"},"categories_level_1":{"type":"keyword"},"categories_level_2":{"type":"keyword"},"coordinates":{"type":"keyword"},"date":{"type":"date","format":"iso8601"},"death_location":{"type":"keyword"},"department":{"type":"long"},"home_location":{"type":"keyword"},"kind":{"type":"keyword"},"sex":{"type":"keyword"}}}}} diff --git a/default-indexes/cm2d_certificate.settings.json b/default-indexes/cm2d_certificate.settings.json new file mode 100644 index 0000000..8a86362 --- /dev/null +++ b/default-indexes/cm2d_certificate.settings.json @@ -0,0 +1 @@ +"{\"cm2d_certificate\":{\"settings\":{\"index\":{\"routing\":{\"allocation\":{\"include\":{\"_tier_preference\":\"data_content\"}}},\"number_of_shards\":\"1\",\"number_of_replicas\":\"1\"}}}}" diff --git a/default-indexes/cm2d_certificate.template.json b/default-indexes/cm2d_certificate.template.json new file mode 100644 index 0000000..71829ad --- /dev/null +++ b/default-indexes/cm2d_certificate.template.json @@ -0,0 +1 @@ +"{}" diff --git a/default-indexes/cm2d_users.mapping.json b/default-indexes/cm2d_users.mapping.json new file mode 100644 index 0000000..827a8f5 --- /dev/null +++ b/default-indexes/cm2d_users.mapping.json @@ -0,0 +1 @@ +{"cm2d_users":{"mappings":{"properties":{"username":{"type":"text"},"versionCGU":{"type":"text"}}}}} diff --git a/default-indexes/cm2d_users.settings.json b/default-indexes/cm2d_users.settings.json new file mode 100644 index 0000000..bbb2dd4 --- /dev/null +++ b/default-indexes/cm2d_users.settings.json @@ -0,0 +1 @@ +"{\"cm2d_users\":{\"settings\":{\"index\":{\"routing\":{\"allocation\":{\"include\":{\"_tier_preference\":\"data_content\"}}},\"number_of_shards\":\"1\",\"number_of_replicas\":\"1\"}}}}" diff --git a/default-indexes/cm2d_users.template.json b/default-indexes/cm2d_users.template.json new file mode 100644 index 0000000..71829ad --- /dev/null +++ b/default-indexes/cm2d_users.template.json @@ -0,0 +1 @@ +"{}" diff --git a/docker-compose.yaml b/docker-compose.yaml index bc211e6..240eac3 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,126 +1,65 @@ version: '3.3' services: - setup: - image: docker.elastic.co/elasticsearch/elasticsearch:8.7.1 - container_name: setup - volumes: - - certs:/usr/share/elasticsearch/config/certs - user: '0' - networks: - - elastic - command: > - bash -c ' - if [ x${ELASTIC_PASSWORD} == x ]; then - echo "Set the ELASTIC_PASSWORD environment variable in the .env file"; - exit 1; - elif [ x${KIBANA_PASSWORD} == x ]; then - echo "Set the KIBANA_PASSWORD environment variable in the .env file"; - exit 1; - fi; - if [ ! -f config/certs/ca.zip ]; then - echo "Creating CA"; - bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip; - unzip config/certs/ca.zip -d config/certs; - fi; - if [ ! -f config/certs/certs.zip ]; then - echo "Creating certs"; - echo -ne \ - "instances:\n"\ - " - name: elasticsearch\n"\ - " dns:\n"\ - " - elasticsearch\n"\ - " - localhost\n"\ - " ip:\n"\ - " - 127.0.0.1\n"\ - " - 0.0.0.0\n"\ - > config/certs/instances.yml; - bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; - unzip config/certs/certs.zip -d config/certs; - fi; - echo "Setting file permissions" - chown -R root:root config/certs; - find . -type d -exec chmod 750 \{\} \;; - find . -type f -exec chmod 640 \{\} \;; - echo "Waiting for Elasticsearch availability"; - until curl -s --cacert config/certs/ca/ca.crt https://elasticsearch:9200 | grep -q "missing authentication credentials"; do sleep 30; done; - echo "Setting kibana_system password"; - until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://elasticsearch:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; - echo "All done!"; - ' - healthcheck: - test: ['CMD-SHELL', '[ -f config/certs/elasticsearch/elasticsearch.crt ]'] - interval: 1s - timeout: 5s - retries: 120 - elasticsearch: - depends_on: - setup: - condition: service_healthy - image: docker.elastic.co/elasticsearch/elasticsearch:8.7.1 + build: docker/elasticsearch container_name: elasticsearch ports: - 9200:9200 - 9300:9300 volumes: - - certs:/usr/share/elasticsearch/config/certs - es_data:/usr/share/elasticsearch/data + - certs:/usr/share/elasticsearch/config/certs networks: - elastic + - webapp environment: - - node.name=elasticsearch - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} - - cluster.name=${CLUSTER_NAME} - - bootstrap.memory_lock=true - - discovery.type=single-node - - xpack.security.enabled=true - - xpack.security.http.ssl.enabled=true - - xpack.security.http.ssl.key=certs/elasticsearch/elasticsearch.key - - xpack.security.http.ssl.certificate=certs/elasticsearch/elasticsearch.crt - - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt - - xpack.security.transport.ssl.enabled=true - - xpack.security.transport.ssl.key=certs/elasticsearch/elasticsearch.key - - xpack.security.transport.ssl.certificate=certs/elasticsearch/elasticsearch.crt - - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt - - xpack.security.transport.ssl.verification_mode=certificate - - xpack.license.self_generated.type=trial ulimits: memlock: soft: -1 hard: -1 - healthcheck: - test: - [ - 'CMD-SHELL', - "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'" - ] - interval: 10s - timeout: 10s - retries: 120 deploy: resources: limits: memory: 4G - + kibana: - image: docker.elastic.co/kibana/kibana:8.7.1 + build: docker/kibana container_name: kibana ports: - 5601:5601 - depends_on: - elasticsearch: - condition: service_healthy + depends_on: + - elasticsearch volumes: - - certs:/usr/share/kibana/config/certs - kibana_data:/usr/share/kibana/data + - certs:/usr/share/kibana/config/certs networks: - elastic environment: - - SERVERNAME=kibana - - ELASTICSEARCH_HOSTS=https://elasticsearch:9200 - - ELASTICSEARCH_USERNAME=kibana_system - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD} - - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt + + webapp: + build: + context: webapp-next + args: + - NEXT_PUBLIC_ELASTIC_API_KEY_NAME=${NEXT_PUBLIC_ELASTIC_API_KEY_NAME} + container_name: webapp + depends_on: + - elasticsearch + ports: + - "3000:3000" + volumes: + - certs:/app/certs + networks: + - webapp + environment: + - ELASTIC_HOST=${ELASTIC_HOST} + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} + - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} + - AWS_REGION=${AWS_REGION} + - EMAIL_SOURCE=${EMAIL_SOURCE} + volumes: certs: driver: local @@ -132,3 +71,5 @@ volumes: networks: elastic: driver: bridge + webapp: + driver: bridge diff --git a/docker/elasticsearch/Dockerfile b/docker/elasticsearch/Dockerfile new file mode 100644 index 0000000..486d246 --- /dev/null +++ b/docker/elasticsearch/Dockerfile @@ -0,0 +1,163 @@ +################################################################################ +# Build stage 1 `builder`: +# Extract Elasticsearch artifact +################################################################################ + +FROM ubuntu:20.04 AS builder + +# Install required packages to extract the Elasticsearch distribution + +RUN for iter in {1..10}; do \ + apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y curl && \ + exit_code=0 && break || \ + exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + exit $exit_code + +# `tini` is a tiny but valid init for containers. This is used to cleanly +# control how ES and any child processes are shut down. +# +# The tini GitHub page gives instructions for verifying the binary using +# gpg, but the keyservers are slow to return the key and this can fail the +# build. Instead, we check the binary against the published checksum. +RUN set -eux ; \ + tini_bin="" ; \ + case "$(arch)" in \ + aarch64) tini_bin='tini-arm64' ;; \ + x86_64) tini_bin='tini-amd64' ;; \ + *) echo >&2 ; echo >&2 "Unsupported architecture $(arch)" ; echo >&2 ; exit 1 ;; \ + esac ; \ + curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v0.19.0/${tini_bin} ; \ + curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v0.19.0/${tini_bin}.sha256sum ; \ + sha256sum -c ${tini_bin}.sha256sum ; \ + rm ${tini_bin}.sha256sum ; \ + mv ${tini_bin} /bin/tini ; \ + chmod 0555 /bin/tini + +RUN mkdir /usr/share/elasticsearch +WORKDIR /usr/share/elasticsearch + +RUN curl --retry 10 -S -L --output /tmp/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-8.7.1-linux-$(arch).tar.gz + +RUN tar -zxf /tmp/elasticsearch.tar.gz --strip-components=1 + +# The distribution includes a `config` directory, no need to create it +COPY config/elasticsearch.yml config/ +COPY config/instances.yml config/certs/instances.yml +COPY config/log4j2.properties config/log4j2.docker.properties + +# 1. Configure the distribution for Docker +# 2. Create required directory +# 3. Move the distribution's default logging config aside +# 4. Move the generated docker logging config so that it is the default +# 5. Reset permissions on all directories +# 6. Reset permissions on all files +# 7. Make CLI tools executable +# 8. Make some directories writable. `bin` must be writable because +# plugins can install their own CLI utilities. +# 9. Make some files writable +RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elasticsearch-env && \ + mkdir data && \ + mv config/log4j2.properties config/log4j2.file.properties && \ + mv config/log4j2.docker.properties config/log4j2.properties && \ + find . -type d -exec chmod 0555 {} + && \ + find . -type f -exec chmod 0444 {} + && \ + chmod 0555 bin/* jdk/bin/* jdk/lib/jspawnhelper modules/x-pack-ml/platform/linux-*/bin/* && \ + chmod 0775 bin config config/jvm.options.d config/certs data logs plugins && \ + find config -type f -exec chmod 0664 {} + + +################################################################################ +# Build stage 2 (the actual Elasticsearch image): +# +# Copy elasticsearch from stage 1 +# Add entrypoint +################################################################################ + +FROM ubuntu:20.04 + +# Change default shell to bash, then install required packages with retries. +RUN yes no | dpkg-reconfigure dash && \ + for iter in {1..10}; do \ + export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y --no-install-recommends \ + ca-certificates curl netcat p11-kit unzip zip && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + exit_code=0 && break || \ + exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + exit $exit_code + +RUN groupadd -g 1000 elasticsearch && \ + adduser --uid 1000 --gid 1000 --home /usr/share/elasticsearch elasticsearch && \ + adduser elasticsearch root && \ + chown -R 0:0 /usr/share/elasticsearch + +ENV ELASTIC_CONTAINER true + +WORKDIR /usr/share/elasticsearch + +COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearch +COPY --from=builder --chown=0:0 /bin/tini /bin/tini + +ENV PATH /usr/share/elasticsearch/bin:$PATH + +COPY bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh + +# 1. Sync the user and group permissions of /etc/passwd +# 2. Set correct permissions of the entrypoint +# 3. Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks. +# We've already run this in previous layers so it ought to be a no-op. +# 4. Replace OpenJDK's built-in CA certificate keystore with the one from the OS +# vendor. The latter is superior in several ways. +# REF: https://github.com/elastic/elasticsearch-docker/issues/171 +# 5. Tighten up permissions on the ES home dir (the permissions of the contents are handled earlier) +# 6. You can't install plugins that include configuration when running as `elasticsearch` and the `config` +# dir is owned by `root`, because the installed tries to manipulate the permissions on the plugin's +# config directory. +RUN chmod g=u /etc/passwd && \ + chmod 0555 /usr/local/bin/docker-entrypoint.sh && \ + find / -xdev -perm -4000 -exec chmod ug-s {} + && \ + chmod 0775 /usr/share/elasticsearch && \ + chown elasticsearch bin config config/jvm.options.d data logs plugins config/certs/ + +# Update "cacerts" bundle to use Ubuntu's CA certificates (and make sure it +# stays up-to-date with changes to Ubuntu's store) +COPY bin/docker-openjdk /etc/ca-certificates/update.d/docker-openjdk +RUN /etc/ca-certificates/update.d/docker-openjdk + +EXPOSE 9200 9300 + +LABEL org.label-schema.build-date="2023-04-27T04:33:42.127815583Z" \ + org.label-schema.license="Elastic-License-2.0" \ + org.label-schema.name="Elasticsearch" \ + org.label-schema.schema-version="1.0" \ + org.label-schema.url="https://www.elastic.co/products/elasticsearch" \ + org.label-schema.usage="https://www.elastic.co/guide/en/elasticsearch/reference/index.html" \ + org.label-schema.vcs-ref="f229ed3f893a515d590d0f39b05f68913e2d9b53" \ + org.label-schema.vcs-url="https://github.com/elastic/elasticsearch" \ + org.label-schema.vendor="Elastic" \ + org.label-schema.version="8.7.1" \ + org.opencontainers.image.created="2023-04-27T04:33:42.127815583Z" \ + org.opencontainers.image.documentation="https://www.elastic.co/guide/en/elasticsearch/reference/index.html" \ + org.opencontainers.image.licenses="Elastic-License-2.0" \ + org.opencontainers.image.revision="f229ed3f893a515d590d0f39b05f68913e2d9b53" \ + org.opencontainers.image.source="https://github.com/elastic/elasticsearch" \ + org.opencontainers.image.title="Elasticsearch" \ + org.opencontainers.image.url="https://www.elastic.co/products/elasticsearch" \ + org.opencontainers.image.vendor="Elastic" \ + org.opencontainers.image.version="8.7.1" + +# Our actual entrypoint is `tini`, a minimal but functional init program. It +# calls the entrypoint we provide, while correctly forwarding signals. +ENTRYPOINT ["/bin/tini", "--"] + +CMD ["/usr/local/bin/docker-entrypoint.sh"] + +USER elasticsearch:root + +################################################################################ +# End of multi-stage Dockerfile +################################################################################ \ No newline at end of file diff --git a/docker/elasticsearch/bin/docker-entrypoint.sh b/docker/elasticsearch/bin/docker-entrypoint.sh new file mode 100755 index 0000000..18997c8 --- /dev/null +++ b/docker/elasticsearch/bin/docker-entrypoint.sh @@ -0,0 +1,99 @@ +#!/bin/bash +set -e + +# Files created by Elasticsearch should always be group writable too +umask 0002 + +# Allow user specify custom CMD, maybe bin/elasticsearch itself +# for example to directly specify `-E` style parameters for elasticsearch on k8s +# or simply to run /bin/bash to check the image +if [[ "$1" == "/usr/local/bin/docker-entrypoint.sh" || $(basename "$1") == "elasticsearch" ]]; then + # Rewrite CMD args to remove the explicit command, + # so that we are backwards compatible with the docs + # from the previous Elasticsearch versions < 6 + # and configuration option: + # https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docker.html#_d_override_the_image_8217_s_default_ulink_url_https_docs_docker_com_engine_reference_run_cmd_default_command_or_options_cmd_ulink + # Without this, user could specify `elasticsearch -E x.y=z` but + # `bin/elasticsearch -E x.y=z` would not work. In any case, + # we want to continue through this script, and not exec early. + set -- "${@:2}" +else + # Run whatever command the user wanted + exec "$@" +fi + +# Allow environment variables to be set by creating a file with the +# contents, and setting an environment variable with the suffix _FILE to +# point to it. This can be used to provide secrets to a container, without +# the values being specified explicitly when running the container. +# +# This is also sourced in elasticsearch-env, and is only needed here +# as well because we use ELASTIC_PASSWORD below. Sourcing this script +# is idempotent. +source /usr/share/elasticsearch/bin/elasticsearch-env-from-file + +if [ ! -f config/certs/ca.zip ]; then + echo "Creating CA"; + bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip; + unzip config/certs/ca.zip -d config/certs; +fi; + +if [ ! -f config/certs/certs.zip ]; then + echo "Creating certs"; + bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; + unzip config/certs/certs.zip -d config/certs; +fi; + +if [[ -f bin/elasticsearch-users ]]; then + # Check for the ELASTIC_PASSWORD environment variable to set the + # bootstrap password for Security. + # + # This is only required for the first node in a cluster with Security + # enabled, but we have no way of knowing which node we are yet. We'll just + # honor the variable if it's present. + + if [[ -n "$ELASTIC_PASSWORD" ]]; then + [[ -f /usr/share/elasticsearch/config/elasticsearch.keystore ]] || (elasticsearch-keystore create) + if ! (elasticsearch-keystore has-passwd --silent) ; then + # keystore is unencrypted + if ! (elasticsearch-keystore list | grep -q '^bootstrap.password$'); then + (echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x 'bootstrap.password') + # setup password for built-in user kibana_system + + fi + else + # keystore requires password + if ! (echo "$KEYSTORE_PASSWORD" \ + | elasticsearch-keystore list | grep -q '^bootstrap.password$') ; then + COMMANDS="$(printf "%s\n%s" "$KEYSTORE_PASSWORD" "$ELASTIC_PASSWORD")" + (echo "$COMMANDS" | elasticsearch-keystore add -x 'bootstrap.password') + fi + fi + fi +fi + +if [[ -n "$ES_LOG_STYLE" ]]; then + case "$ES_LOG_STYLE" in + console) + # This is the default. Nothing to do. + ;; + file) + # Overwrite the default config with the stack config. Do this as a + # copy, not a move, in case the container is restarted. + cp -f /usr/share/elasticsearch/config/log4j2.file.properties /usr/share/elasticsearch/config/log4j2.properties + ;; + *) + echo "ERROR: ES_LOG_STYLE set to [$ES_LOG_STYLE]. Expected [console] or [file]" >&2 + exit 1 ;; + esac +fi + +if [[ -n "$ENROLLMENT_TOKEN" ]]; then + POSITIONAL_PARAMETERS="--enrollment-token $ENROLLMENT_TOKEN" +else + POSITIONAL_PARAMETERS="" +fi + +# Signal forwarding and child reaping is handled by `tini`, which is the +# actual entrypoint of the container +exec /usr/share/elasticsearch/bin/elasticsearch "$@" $POSITIONAL_PARAMETERS <<<"$KEYSTORE_PASSWORD" \ No newline at end of file diff --git a/docker/elasticsearch/bin/docker-openjdk b/docker/elasticsearch/bin/docker-openjdk new file mode 100755 index 0000000..be41357 --- /dev/null +++ b/docker/elasticsearch/bin/docker-openjdk @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -Eeuo pipefail + +# Update "cacerts" bundle to use Ubuntu's CA certificates (and make sure it +# stays up-to-date with changes to Ubuntu's store) + +trust extract \ + --overwrite \ + --format=java-cacerts \ + --filter=ca-anchors \ + --purpose=server-auth \ + /usr/share/elasticsearch/jdk/lib/security/cacerts \ No newline at end of file diff --git a/docker/elasticsearch/config/elasticsearch.yml b/docker/elasticsearch/config/elasticsearch.yml new file mode 100644 index 0000000..d63694d --- /dev/null +++ b/docker/elasticsearch/config/elasticsearch.yml @@ -0,0 +1,19 @@ +node.name: elasticsearch +cluster.name: "docker-cluster" +network.host: 0.0.0.0 + +bootstrap.memory_lock: true + +discovery.type: single-node + +xpack.security.enabled: true +xpack.license.self_generated.type: trial +xpack.security.http.ssl.enabled: true +xpack.security.http.ssl.key: certs/elasticsearch/elasticsearch.key +xpack.security.http.ssl.certificate: certs/elasticsearch/elasticsearch.crt +xpack.security.http.ssl.certificate_authorities: certs/ca/ca.crt +xpack.security.transport.ssl.enabled: true +xpack.security.transport.ssl.key: certs/elasticsearch/elasticsearch.key +xpack.security.transport.ssl.certificate: certs/elasticsearch/elasticsearch.crt +xpack.security.transport.ssl.certificate_authorities: certs/ca/ca.crt +xpack.security.transport.ssl.verification_mode: certificate \ No newline at end of file diff --git a/docker/elasticsearch/config/instances.yml b/docker/elasticsearch/config/instances.yml new file mode 100644 index 0000000..8548bea --- /dev/null +++ b/docker/elasticsearch/config/instances.yml @@ -0,0 +1,7 @@ +instances: + - name: elasticsearch + dns: + - elasticsearch + ip: + - 127.0.0.1 + - 0.0.0.0 \ No newline at end of file diff --git a/docker/elasticsearch/config/log4j2.properties b/docker/elasticsearch/config/log4j2.properties new file mode 100644 index 0000000..8c5a5f6 --- /dev/null +++ b/docker/elasticsearch/config/log4j2.properties @@ -0,0 +1,152 @@ +status = error + +######## Server JSON ############################ +appender.rolling.type = Console +appender.rolling.name = rolling +appender.rolling.layout.type = ECSJsonLayout +appender.rolling.layout.dataset = elasticsearch.server + +################################################ + +################################################ + +rootLogger.level = info +rootLogger.appenderRef.rolling.ref = rolling + +######## Deprecation JSON ####################### +appender.deprecation_rolling.type = Console +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.layout.type = ECSJsonLayout +# Intentionally follows a different pattern to above +appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch +appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter + +appender.header_warning.type = HeaderWarningAppender +appender.header_warning.name = header_warning +################################################# + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = WARN +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.appenderRef.header_warning.ref = header_warning +logger.deprecation.additivity = false + +######## Search slowlog JSON #################### +appender.index_search_slowlog_rolling.type = Console +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog + +################################################# + +################################################# +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +######## Indexing slowlog JSON ################## +appender.index_indexing_slowlog_rolling.type = Console +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog + +################################################# + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false + +logger.org_apache_pdfbox.name = org.apache.pdfbox +logger.org_apache_pdfbox.level = off + +logger.org_apache_poi.name = org.apache.poi +logger.org_apache_poi.level = off + +logger.org_apache_fontbox.name = org.apache.fontbox +logger.org_apache_fontbox.level = off + +logger.org_apache_xmlbeans.name = org.apache.xmlbeans +logger.org_apache_xmlbeans.level = off + +logger.com_amazonaws.name = com.amazonaws +logger.com_amazonaws.level = warn + +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error + +logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics +logger.com_amazonaws_metrics_AwsSdkMetrics.level = error + +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error + +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error + +appender.audit_rolling.type = Console +appender.audit_rolling.name = audit_rolling +appender.audit_rolling.layout.type = PatternLayout +appender.audit_rolling.layout.pattern = {\ + "type":"audit", \ + "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\ + %varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\ + %varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\ + %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\ # "node.name" node name from the `elasticsearch.yml` settings + %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\ # "node.id" node id which should not change between cluster restarts + %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\ # "host.name" unresolved hostname of the local node + %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\ # "host.ip" the local bound ip (i.e. the ip listening for connections) + %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\ # "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change" + %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\ # "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc. + %varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\ # "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal" + %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\ # "user.name" the subject name as authenticated by a realm + %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\ # "user.run_by.name" the original authenticated subject name that is impersonating another one. + %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\ # "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as. + %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\ # "user.realm" the name of the realm that authenticated "user.name" + %varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\ # "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain + %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\ # "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name") + %varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\ # "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain + %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\ # "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from + %varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\ # "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain + %varsNotEmpty{, "user.roles":%map{user.roles}}\ # "user.roles" the roles array of the user; these are the roles that are granting privileges + %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\ # "apikey.id" this field is present if and only if the "authentication.type" is "api_key" + %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\ # "apikey.name" this field is present if and only if the "authentication.type" is "api_key" + %varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\ # "authentication.token.name" this field is present if and only if the authenticating credential is a service account token + %varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\ # "authentication.token.type" this field is present if and only if the authenticating credential is a service account token + %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\ # "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal) + %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\ # "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node + %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\ # "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated + %varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\ # "realm_domain" if "realm" is under a domain, this is the name of the domain + %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\ # "url.path" the URI component between the port and the query string; it is percent (URL) encoded + %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\ # "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded + %varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\ # "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT + %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\ # "request.body" the content of the request body entity, JSON escaped + %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\ # "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request + %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\ # "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal) + %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ # "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) + %varsNotEmpty{, "indices":%map{indices}}\ # "indices" the array of indices that the "action" is acting upon + %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ # "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header + %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\ # "trace_id" an identifier conveyed by the part of "traceparent" request header + %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ # "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) + %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ # "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event + %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ # "rule" name of the applied rule if the "origin.type" is "ip_filter" + %varsNotEmpty{, "put":%map{put}}\ # the "put", "delete", "change", "create", "invalidate" fields are only present when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect + %varsNotEmpty{, "delete":%map{delete}}\ + %varsNotEmpty{, "change":%map{change}}\ + %varsNotEmpty{, "create":%map{create}}\ + %varsNotEmpty{, "invalidate":%map{invalidate}}\ + }%n + + +logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail +logger.xpack_security_audit_logfile.level = info +logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling +logger.xpack_security_audit_logfile.additivity = false + +logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature +logger.xmlsig.level = error +logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter +logger.samlxml_decrypt.level = fatal +logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter +logger.saml2_decrypt.level = fatal \ No newline at end of file diff --git a/docker/kibana/Dockerfile b/docker/kibana/Dockerfile new file mode 100644 index 0000000..564fe19 --- /dev/null +++ b/docker/kibana/Dockerfile @@ -0,0 +1,137 @@ +################################################################################ +# Build stage 0 `builder`: +# Extract Kibana artifact +################################################################################ +FROM ubuntu:20.04 AS builder + + +RUN for iter in {1..10}; do \ + apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y curl && \ + exit_code=0 && break || \ + exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + exit $exit_code + +RUN for iter in {1..10}; do \ + export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y --no-install-recommends \ + fontconfig fonts-liberation libnss3 libfontconfig1 ca-certificates curl && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && \ + sleep 10; \ + done; \ + (exit $exit_code) + +# Add an init process, check the checksum to make sure it's a match +RUN set -e ; \ + TINI_BIN="" ; \ + case "$(arch)" in \ + aarch64) \ + TINI_BIN='tini-arm64' ; \ + ;; \ + x86_64) \ + TINI_BIN='tini-amd64' ; \ + ;; \ + *) echo >&2 "Unsupported architecture $(arch)" ; exit 1 ;; \ + esac ; \ + TINI_VERSION='v0.19.0' ; \ + curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}" ; \ + curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}.sha256sum" ; \ + sha256sum -c "${TINI_BIN}.sha256sum" ; \ + rm "${TINI_BIN}.sha256sum" ; \ + mv "${TINI_BIN}" /bin/tini ; \ + chmod +x /bin/tini + +RUN cd /tmp && \ + curl --retry 8 -s -L \ + --output kibana.tar.gz \ + https://artifacts.elastic.co/downloads/kibana/kibana-8.7.1-linux-$(arch).tar.gz && \ + cd - + + +RUN mkdir /usr/share/kibana +WORKDIR /usr/share/kibana +RUN tar --strip-components=1 -zxf /tmp/kibana.tar.gz +# Ensure that group permissions are the same as user permissions. +# This will help when relying on GID-0 to run Kibana, rather than UID-1000. +# OpenShift does this, for example. +# REF: https://docs.openshift.org/latest/creating_images/guidelines.html +RUN chmod -R g=u /usr/share/kibana + + +################################################################################ +# Build stage 1 (the actual Kibana image): +# +# Copy kibana from stage 0 +# Add entrypoint +################################################################################ +FROM ubuntu:20.04 +EXPOSE 5601 + +# RUN mkdir /usr/share/fonts/local +# RUN curl --retry 8 -S -L -o /usr/share/fonts/local/NotoSansCJK-Regular.ttc https://github.com/googlefonts/noto-cjk/raw/NotoSansV2.001/NotoSansCJK-Regular.ttc +# RUN echo "5dcd1c336cc9344cb77c03a0cd8982ca8a7dc97d620fd6c9c434e02dcb1ceeb3 /usr/share/fonts/local/NotoSansCJK-Regular.ttc" | sha256sum -c - +# RUN fc-cache -v + +# Provide a non-root user to run the process. +RUN groupadd --gid 1000 kibana && \ + useradd --uid 1000 --gid 1000 -G 0 \ + --home-dir /usr/share/kibana --no-create-home \ + kibana + +ENV ELASTIC_CONTAINER true + +WORKDIR /usr/share/kibana +RUN ln -s /usr/share/kibana /opt/kibana + +# Bring in Kibana from the initial stage. +COPY --from=builder --chown=1000:0 /usr/share/kibana /usr/share/kibana +COPY --from=builder --chown=1000:0 /bin/tini /bin/tini + +ENV PATH=/usr/share/kibana/bin:$PATH + +# Set some Kibana configuration defaults. +COPY --chown=1000:0 config/kibana.yml /usr/share/kibana/config/kibana.yml + +# Add the launcher/wrapper script. It knows how to interpret environment +# variables and translate them to Kibana CLI options. +COPY bin/docker-entrypoint.sh /usr/local/bin/ + +# Ensure gid 0 write permissions for OpenShift. +RUN chmod g+ws /usr/share/kibana && \ + find /usr/share/kibana -gid 0 -and -not -perm /g+w -exec chmod g+w {} \; + +# Remove the suid bit everywhere to mitigate "Stack Clash" +RUN find / -xdev -perm -4000 -exec chmod u-s {} + + +LABEL org.label-schema.build-date="2023-04-27T11:07:16.365Z" \ + org.label-schema.license="Elastic License" \ + org.label-schema.name="Kibana" \ + org.label-schema.schema-version="1.0" \ + org.label-schema.url="https://www.elastic.co/products/kibana" \ + org.label-schema.usage="https://www.elastic.co/guide/en/kibana/reference/index.html" \ + org.label-schema.vcs-ref="40546954e91188153267c4bc92c65c93e45c71ea" \ + org.label-schema.vcs-url="https://github.com/elastic/kibana" \ + org.label-schema.vendor="Elastic" \ + org.label-schema.version="8.7.1" \ + org.opencontainers.image.created="2023-04-27T11:07:16.365Z" \ + org.opencontainers.image.documentation="https://www.elastic.co/guide/en/kibana/reference/index.html" \ + org.opencontainers.image.licenses="Elastic License" \ + org.opencontainers.image.revision="40546954e91188153267c4bc92c65c93e45c71ea" \ + org.opencontainers.image.source="https://github.com/elastic/kibana" \ + org.opencontainers.image.title="Kibana" \ + org.opencontainers.image.url="https://www.elastic.co/products/kibana" \ + org.opencontainers.image.vendor="Elastic" \ + org.opencontainers.image.version="8.7.1" + +ENTRYPOINT ["/bin/tini", "--"] + +CMD ["/usr/local/bin/docker-entrypoint.sh"] + +USER kibana + +################################################################################ +# End of multi-stage Dockerfile +################################################################################ \ No newline at end of file diff --git a/docker/kibana/bin/docker-entrypoint.sh b/docker/kibana/bin/docker-entrypoint.sh new file mode 100755 index 0000000..3c77da8 --- /dev/null +++ b/docker/kibana/bin/docker-entrypoint.sh @@ -0,0 +1,443 @@ +#!/bin/bash + +# Run Kibana, using environment variables to set longopts defining Kibana's +# configuration. +# +# eg. Setting the environment variable: +# +# ELASTICSEARCH_LOGQUERIES=true +# +# will cause Kibana to be invoked with: +# +# --elasticsearch.logQueries=true + +kibana_vars=( + apm_oss.apmAgentConfigurationIndex + apm_oss.errorIndices + apm_oss.indexPattern + apm_oss.metricsIndices + apm_oss.onboardingIndices + apm_oss.sourcemapIndices + apm_oss.spanIndices + apm_oss.transactionIndices + console.proxyConfig + console.proxyFilter + csp.strict + csp.warnLegacyBrowsers + csp.disableUnsafeEval + csp.script_src + csp.worker_src + csp.style_src + csp.connect_src + csp.default_src + csp.font_src + csp.frame_src + csp.img_src + csp.frame_ancestors + csp.report_uri + csp.report_to + data.autocomplete.valueSuggestions.terminateAfter + data.autocomplete.valueSuggestions.timeout + data.search.asyncSearch.waitForCompletion + data.search.asyncSearch.keepAlive + data.search.asyncSearch.batchedReduceSize + data.search.asyncSearch.pollInterval + data.search.sessions.defaultExpiration + data.search.sessions.enabled + data.search.sessions.maxUpdateRetries + data.search.sessions.notTouchedInProgressTimeout + data.search.sessions.notTouchedTimeout + data.search.sessions.pageSize + data.search.sessions.trackingInterval + unifiedSearch.autocomplete.valueSuggestions.terminateAfter + unifiedSearch.autocomplete.valueSuggestions.timeout + unifiedSearch.autocomplete.querySuggestions.enabled + unifiedSearch.autocomplete.valueSuggestions.enabled + unifiedSearch.autocomplete.valueSuggestions.tiers + elasticsearch.customHeaders + elasticsearch.hosts + elasticsearch.logQueries + elasticsearch.password + elasticsearch.pingTimeout + elasticsearch.requestHeadersWhitelist + elasticsearch.requestTimeout + elasticsearch.serviceAccountToken + elasticsearch.shardTimeout + elasticsearch.sniffInterval + elasticsearch.sniffOnConnectionFault + elasticsearch.sniffOnStart + elasticsearch.ssl.alwaysPresentCertificate + elasticsearch.ssl.certificate + elasticsearch.ssl.certificateAuthorities + elasticsearch.ssl.key + elasticsearch.ssl.keyPassphrase + elasticsearch.ssl.keystore.password + elasticsearch.ssl.keystore.path + elasticsearch.ssl.truststore.password + elasticsearch.ssl.truststore.path + elasticsearch.ssl.verificationMode + elasticsearch.username + enterpriseSearch.accessCheckTimeout + enterpriseSearch.accessCheckTimeoutWarning + enterpriseSearch.host + externalUrl.policy + i18n.locale + interactiveSetup.enabled + interactiveSetup.connectionCheck.interval + interpreter.enableInVisualize + kibana.autocompleteTerminateAfter + kibana.autocompleteTimeout + kibana.index + logging.appenders + logging.appenders.console + logging.appenders.file + logging.loggers + logging.loggers.appenders + logging.loggers.level + logging.loggers.name + logging.root + logging.root.appenders + logging.root.level + map.includeElasticMapsService + map.tilemap.options.attribution + map.tilemap.options.maxZoom + map.tilemap.options.minZoom + map.tilemap.options.subdomains + map.tilemap.url + migrations.batchSize + migrations.maxBatchSizeBytes + migrations.pollInterval + migrations.retryAttempts + migrations.scrollDuration + migrations.skip + monitoring.cluster_alerts.email_notifications.email_address + monitoring.kibana.collection.enabled + monitoring.kibana.collection.interval + monitoring.ui.container.elasticsearch.enabled + monitoring.ui.container.logstash.enabled + monitoring.ui.elasticsearch.hosts + monitoring.ui.elasticsearch.logFetchCount + monitoring.ui.elasticsearch.password + monitoring.ui.elasticsearch.pingTimeout + monitoring.ui.elasticsearch.ssl.certificateAuthorities + monitoring.ui.elasticsearch.ssl.verificationMode + monitoring.ui.elasticsearch.username + monitoring.ui.enabled + monitoring.ui.logs.index + monitoring.ui.max_bucket_size + monitoring.ui.min_interval_seconds + newsfeed.enabled + node.roles + ops.cGroupOverrides.cpuAcctPath + ops.cGroupOverrides.cpuPath + ops.interval + path.data + pid.file + regionmap + savedObjects.maxImportExportSize + savedObjects.maxImportPayloadBytes + security.showInsecureClusterWarning + server.basePath + server.compression.enabled + server.compression.referrerWhitelist + server.cors + server.cors.allowCredentials + server.cors.allowOrigin + server.cors.enabled + server.cors.origin + server.customResponseHeaders + server.defaultRoute + server.host + server.keepAliveTimeout + server.maxPayload + server.maxPayloadBytes + server.name + server.port + server.publicBaseUrl + server.requestId.allowFromAnyIp + server.requestId.ipAllowlist + server.rewriteBasePath + server.securityResponseHeaders.disableEmbedding + server.securityResponseHeaders.permissionsPolicy + server.securityResponseHeaders.referrerPolicy + server.securityResponseHeaders.strictTransportSecurity + server.securityResponseHeaders.xContentTypeOptions + server.securityResponseHeaders.crossOriginOpenerPolicy + server.shutdownTimeout + server.socketTimeout + server.ssl.cert + server.ssl.certificate + server.ssl.certificateAuthorities + server.ssl.cipherSuites + server.ssl.clientAuthentication + server.ssl.enabled + server.ssl.key + server.ssl.keyPassphrase + server.ssl.keystore.password + server.ssl.keystore.path + server.ssl.redirectHttpFromPort + server.ssl.supportedProtocols + server.ssl.truststore.password + server.ssl.truststore.path + server.uuid + server.xsrf.allowlist + server.xsrf.disableProtection + status.allowAnonymous + status.v6ApiFormat + telemetry.allowChangingOptInStatus + telemetry.enabled + telemetry.hidePrivacyStatement + telemetry.optIn + telemetry.sendUsageTo + telemetry.sendUsageFrom + tilemap.options.attribution + tilemap.options.maxZoom + tilemap.options.minZoom + tilemap.options.subdomains + tilemap.url + vega.enableExternalUrls + vis_type_vega.enableExternalUrls + xpack.actions.allowedHosts + xpack.actions.customHostSettings + xpack.actions.email.domain_allowlist + xpack.actions.enabledActionTypes + xpack.actions.maxResponseContentLength + xpack.actions.preconfigured + xpack.actions.preconfiguredAlertHistoryEsIndex + xpack.actions.proxyBypassHosts + xpack.actions.proxyHeaders + xpack.actions.proxyOnlyHosts + xpack.actions.proxyRejectUnauthorizedCertificates + xpack.actions.proxyUrl + xpack.actions.rejectUnauthorized + xpack.actions.responseTimeout + xpack.actions.ssl.proxyVerificationMode + xpack.actions.ssl.verificationMode + xpack.alerting.healthCheck.interval + xpack.alerting.invalidateApiKeysTask.interval + xpack.alerting.invalidateApiKeysTask.removalDelay + xpack.alerting.defaultRuleTaskTimeout + xpack.alerting.rules.run.timeout + xpack.alerting.rules.run.ruleTypeOverrides + xpack.alerting.cancelAlertsOnRuleTimeout + xpack.alerting.rules.minimumScheduleInterval.value + xpack.alerting.rules.minimumScheduleInterval.enforce + xpack.alerting.rules.run.actions.max + xpack.alerting.rules.run.alerts.max + xpack.alerting.rules.run.actions.connectorTypeOverrides + xpack.alerts.healthCheck.interval + xpack.alerts.invalidateApiKeysTask.interval + xpack.alerts.invalidateApiKeysTask.removalDelay + xpack.apm.indices.error + xpack.apm.indices.metric + xpack.apm.indices.onboarding + xpack.apm.indices.sourcemap + xpack.apm.indices.span + xpack.apm.indices.transaction + xpack.apm.maxServiceEnvironments + xpack.apm.searchAggregatedTransactions + xpack.apm.serviceMapEnabled + xpack.apm.serviceMapFingerprintBucketSize + xpack.apm.serviceMapFingerprintGlobalBucketSize + xpack.apm.ui.enabled + xpack.apm.ui.maxTraceItems + xpack.apm.ui.transactionGroupBucketSize + xpack.banners.backgroundColor + xpack.banners.disableSpaceBanners + xpack.banners.placement + xpack.banners.textColor + xpack.banners.textContent + xpack.code.disk.thresholdEnabled + xpack.code.disk.watermarkLow + xpack.code.indexRepoFrequencyMs + xpack.code.lsp.verbose + xpack.code.maxWorkspace + xpack.code.security.enableGitCertCheck + xpack.code.security.gitHostWhitelist + xpack.code.security.gitProtocolWhitelist + xpack.code.ui.enabled + xpack.code.updateRepoFrequencyMs + xpack.code.verbose + xpack.data_enhanced.search.sessions.defaultExpiration + xpack.data_enhanced.search.sessions.enabled + xpack.data_enhanced.search.sessions.maxUpdateRetries + xpack.data_enhanced.search.sessions.notTouchedInProgressTimeout + xpack.data_enhanced.search.sessions.notTouchedTimeout + xpack.data_enhanced.search.sessions.pageSize + xpack.data_enhanced.search.sessions.trackingInterval + xpack.discoverEnhanced.actions.exploreDataInChart.enabled + xpack.discoverEnhanced.actions.exploreDataInContextMenu.enabled + xpack.encryptedSavedObjects.encryptionKey + xpack.encryptedSavedObjects.keyRotation.decryptionOnlyKeys + xpack.event_log.indexEntries + xpack.event_log.logEntries + xpack.fleet.agentPolicies + xpack.fleet.agents.elasticsearch.host + xpack.fleet.agents.elasticsearch.hosts + xpack.fleet.agents.enabled + xpack.fleet.agents.fleet_server.hosts + xpack.fleet.agents.kibana.host + xpack.fleet.agents.tlsCheckDisabled + xpack.fleet.packages + xpack.fleet.registryProxyUrl + xpack.fleet.registryUrl + xpack.graph.canEditDrillDownUrls + xpack.graph.savePolicy + xpack.infra.query.partitionFactor + xpack.infra.query.partitionSize + xpack.infra.sources.default.fields.container + xpack.infra.sources.default.fields.host + xpack.infra.sources.default.fields.message + xpack.infra.sources.default.fields.pod + xpack.infra.sources.default.fields.tiebreaker + xpack.infra.sources.default.fields.timestamp + xpack.infra.sources.default.logAlias + xpack.infra.sources.default.metricAlias + xpack.ingestManager.fleet.tlsCheckDisabled + xpack.ingestManager.registryUrl + xpack.observability.annotations.index + xpack.observability.unsafe.slo.enabled + xpack.observability.unsafe.alertDetails.apm.enabled + xpack.observability.unsafe.alertDetails.metrics.enabled + xpack.observability.unsafe.alertDetails.logs.enabled + xpack.observability.unsafe.alertDetails.uptime.enabled + xpack.reporting.capture.browser.autoDownload + xpack.reporting.capture.browser.chromium.disableSandbox + xpack.reporting.capture.browser.chromium.inspect + xpack.reporting.capture.browser.chromium.maxScreenshotDimension + xpack.reporting.capture.browser.chromium.proxy.bypass + xpack.reporting.capture.browser.chromium.proxy.enabled + xpack.reporting.capture.browser.chromium.proxy.server + xpack.reporting.capture.browser.type + xpack.reporting.capture.concurrency + xpack.reporting.capture.loadDelay + xpack.reporting.capture.maxAttempts + xpack.reporting.capture.networkPolicy + xpack.reporting.capture.settleTime + xpack.reporting.capture.timeout + xpack.reporting.capture.timeouts.openUrl + xpack.reporting.capture.timeouts.openUrl + xpack.reporting.capture.timeouts.renderComplete + xpack.reporting.capture.timeouts.waitForElements + xpack.reporting.capture.viewport.height + xpack.reporting.capture.viewport.width + xpack.reporting.capture.zoom + xpack.reporting.csv.checkForFormulas + xpack.reporting.csv.enablePanelActionDownload + xpack.reporting.csv.escapeFormulaValues + xpack.reporting.csv.maxSizeBytes + xpack.reporting.csv.scroll.duration + xpack.reporting.csv.scroll.size + xpack.reporting.csv.useByteOrderMarkEncoding + xpack.reporting.enabled + xpack.reporting.encryptionKey + xpack.reporting.kibanaApp + xpack.reporting.kibanaServer.hostname + xpack.reporting.kibanaServer.port + xpack.reporting.kibanaServer.protocol + xpack.reporting.poll.jobCompletionNotifier.interval + xpack.reporting.poll.jobCompletionNotifier.intervalErrorMultiplier + xpack.reporting.poll.jobsRefresh.interval + xpack.reporting.poll.jobsRefresh.intervalErrorMultiplier + xpack.reporting.queue.indexInterval + xpack.reporting.queue.pollEnabled + xpack.reporting.queue.pollInterval + xpack.reporting.queue.pollIntervalErrorMultiplier + xpack.reporting.queue.timeout + xpack.reporting.roles.allow + xpack.reporting.roles.enabled + xpack.ruleRegistry.write.enabled + xpack.security.accessAgreement.message + xpack.security.audit.appender.fileName + xpack.security.audit.appender.layout.highlight + xpack.security.audit.appender.layout.pattern + xpack.security.audit.appender.layout.type + xpack.security.audit.appender.legacyLoggingConfig + xpack.security.audit.appender.policy.interval + xpack.security.audit.appender.policy.modulate + xpack.security.audit.appender.policy.size + xpack.security.audit.appender.policy.type + xpack.security.audit.appender.strategy.max + xpack.security.audit.appender.strategy.pattern + xpack.security.audit.appender.strategy.type + xpack.security.audit.appender.type + xpack.security.audit.enabled + xpack.security.audit.ignore_filters + xpack.security.authc.http.autoSchemesEnabled + xpack.security.authc.http.enabled + xpack.security.authc.http.schemes + xpack.security.authc.oidc.realm + xpack.security.authc.providers + xpack.security.authc.saml.maxRedirectURLSize + xpack.security.authc.saml.realm + xpack.security.authc.selector.enabled + xpack.security.cookieName + xpack.security.encryptionKey + xpack.security.loginAssistanceMessage + xpack.security.loginHelp + xpack.security.sameSiteCookies + xpack.security.secureCookies + xpack.security.session.cleanupInterval + xpack.security.session.concurrentSessions.maxSessions + xpack.security.session.idleTimeout + xpack.security.session.lifespan + xpack.security.sessionTimeout + xpack.security.showInsecureClusterWarning + xpack.securitySolution.alertMergeStrategy + xpack.securitySolution.alertIgnoreFields + xpack.securitySolution.maxExceptionsImportSize + xpack.securitySolution.maxRuleImportExportSize + xpack.securitySolution.maxRuleImportPayloadBytes + xpack.securitySolution.maxTimelineImportExportSize + xpack.securitySolution.maxTimelineImportPayloadBytes + xpack.securitySolution.packagerTaskInterval + xpack.securitySolution.prebuiltRulesPackageVersion + xpack.spaces.maxSpaces + xpack.task_manager.max_attempts + xpack.task_manager.max_poll_inactivity_cycles + xpack.task_manager.max_workers + xpack.task_manager.monitored_aggregated_stats_refresh_rate + xpack.task_manager.monitored_stats_required_freshness + xpack.task_manager.monitored_stats_running_average_window + xpack.task_manager.monitored_stats_health_verbose_log.enabled + xpack.task_manager.monitored_stats_health_verbose_log.warn_delayed_task_start_in_seconds + xpack.task_manager.monitored_task_execution_thresholds + xpack.task_manager.poll_interval + xpack.task_manager.request_capacity + xpack.task_manager.version_conflict_threshold + xpack.task_manager.event_loop_delay.monitor + xpack.task_manager.event_loop_delay.warn_threshold + xpack.uptime.index +) + +longopts='' +for kibana_var in ${kibana_vars[*]}; do + # 'elasticsearch.hosts' -> 'ELASTICSEARCH_HOSTS' + env_var=$(echo ${kibana_var^^} | tr . _) + + # Indirectly lookup env var values via the name of the var. + # REF: http://tldp.org/LDP/abs/html/bashver2.html#EX78 + value=${!env_var} + if [[ -n $value ]]; then + longopt="--${kibana_var}=${value}" + longopts+=" ${longopt}" + fi +done + +# Files created at run-time should be group-writable, for Openshift's sake. +umask 0002 + +# The virtual file /proc/self/cgroup should list the current cgroup +# membership. For each hierarchy, you can follow the cgroup path from +# this file to the cgroup filesystem (usually /sys/fs/cgroup/) and +# introspect the statistics for the cgroup for the given +# hierarchy. Alas, Docker breaks this by mounting the container +# statistics at the root while leaving the cgroup paths as the actual +# paths. Therefore, Kibana provides a mechanism to override +# reading the cgroup path from /proc/self/cgroup and instead uses the +# cgroup path defined the configuration properties +# ops.cGroupOverrides.cpuPath and ops.cGroupOverrides.cpuAcctPath. +# Therefore, we set this value here so that cgroup statistics are +# available for the container this process will run in. + +exec /usr/share/kibana/bin/kibana --ops.cGroupOverrides.cpuPath=/ --ops.cGroupOverrides.cpuAcctPath=/ ${longopts} "$@" \ No newline at end of file diff --git a/docker/kibana/config/kibana.yml b/docker/kibana/config/kibana.yml new file mode 100644 index 0000000..266087a --- /dev/null +++ b/docker/kibana/config/kibana.yml @@ -0,0 +1,11 @@ +# Default Kibana configuration for docker target +server.name: "kibana" +server.host: "0.0.0.0" +server.shutdownTimeout: "5s" + +elasticsearch.hosts: [ "https://elasticsearch:9200" ] +elasticsearch.ssl.certificateAuthorities: [ "config/certs/ca/ca.crt" ] +elasticsearch.username: "kibana_system" +elasticsearch.password: "bootstrap.password" # environnement variable set in docker-compose.yml + +monitoring.ui.container.elasticsearch.enabled: true \ No newline at end of file diff --git a/webapp-next/.dockerignore b/webapp-next/.dockerignore new file mode 100644 index 0000000..dd02b1b --- /dev/null +++ b/webapp-next/.dockerignore @@ -0,0 +1,8 @@ +.env +Dockerfile +.dockerignore +node_modules +npm-debug.log +README.md +.next +.git \ No newline at end of file diff --git a/webapp-next/Dockerfile b/webapp-next/Dockerfile new file mode 100644 index 0000000..b76e926 --- /dev/null +++ b/webapp-next/Dockerfile @@ -0,0 +1,60 @@ +##### DEPENDENCIES + +FROM node:16-alpine3.17 AS deps +RUN apk add --no-cache libc6-compat openssl1.1-compat +WORKDIR /app + +# Install dependencies based on the preferred package manager + +COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml\* ./ + +RUN \ + if [ -f yarn.lock ]; then yarn --frozen-lockfile; \ + elif [ -f package-lock.json ]; then npm install; \ + else echo "Lockfile not found." && exit 1; \ + fi + +##### BUILDER + +FROM node:16-alpine3.17 AS builder + +ARG ELASTIC_HOST +ARG ELASTIC_PASSWORD +ARG NEXT_PUBLIC_ELASTIC_API_KEY_NAME +ARG AWS_ACCESS_KEY_ID +ARG AWS_SECRET_ACCESS_KEY +ARG AWS_REGION +ARG EMAIL_SOURCE + +WORKDIR /app +COPY --from=deps /app/node_modules ./node_modules +COPY . . + +RUN \ + if [ -f yarn.lock ]; then yarn build; \ + elif [ -f package-lock.json ]; then npm run build; \ + else echo "Lockfile not found." && exit 1; \ + fi + +##### RUNNER + +FROM node:16-alpine3.17 AS runner +WORKDIR /app + +ENV NODE_ENV production + +RUN addgroup --system --gid 1001 nodejs +RUN adduser --system --uid 1001 nextjs + +COPY --from=builder /app/next.config.js ./ +COPY --from=builder /app/public ./public +COPY --from=builder /app/package.json ./package.json + +COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./ +COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static + +USER nextjs +EXPOSE 3000 +ENV PORT 3000 + +CMD ["node", "server.js"] diff --git a/webapp-next/next.config.js b/webapp-next/next.config.js index a843cbe..c27de92 100644 --- a/webapp-next/next.config.js +++ b/webapp-next/next.config.js @@ -1,6 +1,7 @@ /** @type {import('next').NextConfig} */ const nextConfig = { reactStrictMode: true, + output: "standalone" } module.exports = nextConfig diff --git a/webapp-next/pages/api/auth/create-user.ts b/webapp-next/pages/api/auth/create-user.ts index 89bf27e..29ad675 100644 --- a/webapp-next/pages/api/auth/create-user.ts +++ b/webapp-next/pages/api/auth/create-user.ts @@ -17,7 +17,7 @@ export default async function handler( }, tls: { ca: fs.readFileSync( - path.resolve(process.cwd(), './../certificates/ca.crt') + path.resolve(process.cwd(), './certs/ca/ca.crt') ), rejectUnauthorized: false } diff --git a/webapp-next/pages/api/auth/index.ts b/webapp-next/pages/api/auth/index.ts index f473be8..1905fef 100644 --- a/webapp-next/pages/api/auth/index.ts +++ b/webapp-next/pages/api/auth/index.ts @@ -29,7 +29,7 @@ export default async function handler( }, tls: { ca: fs.readFileSync( - path.resolve(process.cwd(), './../certificates/ca.crt') + path.resolve(process.cwd(), './certs/ca/ca.crt') ), rejectUnauthorized: false } diff --git a/webapp-next/pages/api/auth/user.ts b/webapp-next/pages/api/auth/user.ts index d5a2ef0..7889e23 100644 --- a/webapp-next/pages/api/auth/user.ts +++ b/webapp-next/pages/api/auth/user.ts @@ -20,7 +20,7 @@ export default async function handler( }, tls: { ca: fs.readFileSync( - path.resolve(process.cwd(), './../certificates/ca.crt') + path.resolve(process.cwd(), './certs/ca/ca.crt') ), rejectUnauthorized: false } diff --git a/webapp-next/pages/api/auth/verify-code.ts b/webapp-next/pages/api/auth/verify-code.ts index a385647..42a64d7 100644 --- a/webapp-next/pages/api/auth/verify-code.ts +++ b/webapp-next/pages/api/auth/verify-code.ts @@ -21,7 +21,7 @@ export default async function handler( }, tls: { ca: fs.readFileSync( - path.resolve(process.cwd(), './../certificates/ca.crt') + path.resolve(process.cwd(), './certs/ca/ca.crt') ), rejectUnauthorized: false } diff --git a/webapp-next/pages/api/elk/data.ts b/webapp-next/pages/api/elk/data.ts index 51baa3b..1e44008 100644 --- a/webapp-next/pages/api/elk/data.ts +++ b/webapp-next/pages/api/elk/data.ts @@ -23,7 +23,7 @@ export default async function handler( }, tls: { ca: fs.readFileSync( - path.resolve(process.cwd(), './../certificates/ca.crt') + path.resolve(process.cwd(), './certs/ca/ca.crt') ), rejectUnauthorized: false } diff --git a/webapp-next/pages/api/elk/first.ts b/webapp-next/pages/api/elk/first.ts index b9bf587..78b3d68 100644 --- a/webapp-next/pages/api/elk/first.ts +++ b/webapp-next/pages/api/elk/first.ts @@ -23,7 +23,7 @@ export default async function handler( apiKey: req.cookies[ELASTIC_API_KEY_NAME] as string }, tls: { - ca: fs.readFileSync(path.resolve(process.cwd(), './../certificates/ca.crt')), + ca: fs.readFileSync(path.resolve(process.cwd(), './certs/ca/ca.crt')), rejectUnauthorized: false } });