diff --git a/.env b/.env
new file mode 100644
index 00000000..12a9ad9b
--- /dev/null
+++ b/.env
@@ -0,0 +1,52 @@
+ELASTIC_VERSION=8.6.2
+
+## Passwords for stack users
+#
+
+# User 'elastic' (built-in)
+#
+# Superuser role, full access to cluster management and data indices.
+# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
+ELASTIC_PASSWORD='admin123@'
+
+# User 'logstash_internal' (custom)
+#
+# The user Logstash uses to connect and send data to Elasticsearch.
+# https://www.elastic.co/guide/en/logstash/current/ls-security.html
+LOGSTASH_INTERNAL_PASSWORD='admin123@'
+
+# User 'kibana_system' (built-in)
+#
+# The user Kibana uses to connect and communicate with Elasticsearch.
+# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
+KIBANA_SYSTEM_PASSWORD='admin123@'
+
+# Users 'metricbeat_internal', 'filebeat_internal' and 'heartbeat_internal' (custom)
+#
+# The users Beats use to connect and send data to Elasticsearch.
+# https://www.elastic.co/guide/en/beats/metricbeat/current/feature-roles.html
+METRICBEAT_INTERNAL_PASSWORD='admin123@'
+FILEBEAT_INTERNAL_PASSWORD='admin123@'
+HEARTBEAT_INTERNAL_PASSWORD='admin123@'
+
+# User 'monitoring_internal' (custom)
+#
+# The user Metricbeat uses to collect monitoring data from stack components.
+# https://www.elastic.co/guide/en/elasticsearch/reference/current/how-monitoring-works.html
+MONITORING_INTERNAL_PASSWORD='admin123@'
+
+# User 'beats_system' (built-in)
+#
+# The user the Beats use when storing monitoring information in Elasticsearch.
+# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
+BEATS_SYSTEM_PASSWORD='admin123@'
+
+# RabbitMQ
+#
+RABBITMQ_DEFAULT_USER='rabbit'
+RABBITMQ_DEFAULT_PASS='admin123@'
+
+# Kafka
+#
+KAFKA_CLIENT_USERS='kafka'
+KAFKA_CLIENT_PASSWORDS='admin123@'
\ No newline at end of file
diff --git a/docker-compose.dcproj b/docker-compose.dcproj
index ca062f25..9e42c14b 100644
--- a/docker-compose.dcproj
+++ b/docker-compose.dcproj
@@ -9,10 +9,68 @@
yanlib.httpapi.host
+
docker-compose.yml
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docker-compose.yml b/docker-compose.yml
index ab667f17..e42934c4 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -6,3 +6,173 @@ services:
build:
context: .
dockerfile: host/YANLib.HttpApi.Host/Dockerfile
+
+ setup:
+ build:
+ context: setup/
+ args:
+ ELASTIC_VERSION: ${ELASTIC_VERSION}
+ init: true
+ container_name: setup
+ volumes:
+ - ./setup/entrypoint.sh:/entrypoint.sh:ro,Z
+ - ./setup/lib.sh:/lib.sh:ro,Z
+ - ./setup/roles:/roles:ro,Z
+ - setup:/state:Z
+ environment:
+ ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
+ LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
+ KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
+ METRICBEAT_INTERNAL_PASSWORD: ${METRICBEAT_INTERNAL_PASSWORD:-}
+ FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-}
+ HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-}
+ MONITORING_INTERNAL_PASSWORD: ${MONITORING_INTERNAL_PASSWORD:-}
+ BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
+ RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER:-}
+ RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS:-}
+ KAFKA_CLIENT_USERS: ${KAFKA_CLIENT_USERS:-}
+ KAFKA_CLIENT_PASSWORDS: ${KAFKA_CLIENT_PASSWORDS:-}
+ networks:
+ - demo
+ depends_on:
+ - elasticsearch
+
+ elasticsearch:
+ build:
+ context: elasticsearch/
+ args:
+ ELASTIC_VERSION: ${ELASTIC_VERSION}
+ container_name: elasticsearch
+ volumes:
+ - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,Z
+ - elasticsearch:/usr/share/elasticsearch/data:Z
+ ports:
+ - 9200:9200
+ - 9300:9300
+ environment:
+ ES_JAVA_OPTS: -Xms512m -Xmx512m
+ # Bootstrap password.
+ # Used to initialize the keystore during the initial startup of
+ # Elasticsearch. Ignored on subsequent runs.
+ ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
+ # Use single node discovery in order to disable production mode and avoid bootstrap checks.
+ # see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
+ networks:
+ - demo
+ restart: unless-stopped
+
+ logstash:
+ build:
+ context: logstash/
+ args:
+ ELASTIC_VERSION: ${ELASTIC_VERSION}
+ container_name: logstash
+ volumes:
+ - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
+ - ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
+ - ./host/YANLib.HttpApi.Host/Logs:/usr/share/logstash/logs:ro,Z
+ ports:
+ - 9600:9600
+ - 5044:5044
+ - 50000:50000/tcp
+ - 50000:50000/udp
+ environment:
+ LS_JAVA_OPTS: -Xms256m -Xmx256m
+ LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
+ networks:
+ - demo
+ depends_on:
+ - elasticsearch
+ restart: unless-stopped
+
+ kibana:
+ build:
+ context: kibana/
+ args:
+ ELASTIC_VERSION: ${ELASTIC_VERSION}
+ container_name: kibana
+ volumes:
+ - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z
+ ports:
+ - 5601:5601
+ environment:
+ KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
+ networks:
+ - demo
+ depends_on:
+ - elasticsearch
+ restart: unless-stopped
+
+ rabbitmq:
+ image: rabbitmq:3-management
+ container_name: rabbitmq
+ ports:
+ - 5672:5672
+ - 15672:15672
+ environment:
+ RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER:-}
+ RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS:-}
+ networks:
+ - demo
+ depends_on:
+ - logstash
+ restart: unless-stopped
+
+ zookeeper:
+ image: bitnami/zookeeper:latest
+ container_name: zookeeper
+ ports:
+ - 2181:2181
+ environment:
+ - ALLOW_ANONYMOUS_LOGIN=yes
+ networks:
+ - demo
+ depends_on:
+ - logstash
+ restart: unless-stopped
+
+ kafka:
+ image: bitnami/kafka:latest
+ container_name: kafka
+ ports:
+ - 9092:9092
+ - 9093:9093
+ environment:
+ KAFKA_BROKER_ID: 1
+ KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CLIENT:PLAINTEXT,EXTERNAL:PLAINTEXT
+ KAFKA_CFG_LISTENERS: CLIENT://:9093,EXTERNAL://:9092
+ KAFKA_CFG_ADVERTISED_LISTENERS: CLIENT://kafka:9093,EXTERNAL://localhost:9092
+ KAFKA_CFG_INTER_BROKER_LISTENER_NAME: CLIENT
+ ALLOW_PLAINTEXT_LISTENER: yes
+ KAFKA_CLIENT_USERS: ${KAFKA_CLIENT_USERS:-}
+ KAFKA_CLIENT_PASSWORDS: ${KAFKA_CLIENT_PASSWORDS:-}
+ networks:
+ - demo
+ depends_on:
+ - zookeeper
+ restart: unless-stopped
+
+ kafka-ui:
+ image: provectuslabs/kafka-ui:latest
+ container_name: kafka-ui
+ ports:
+ - 8080:8080
+ environment:
+ - KAFKA_CLUSTERS_0_NAME=local
+ - KAFKA_CLUSTERS_0_ZOOKEEPER=zookeeper:2181
+ - KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9093
+ networks:
+ - demo
+ depends_on:
+ - zookeeper
+ - kafka
+ restart: unless-stopped
+
+networks:
+ demo:
+ driver: bridge
+
+volumes:
+ setup:
+ elasticsearch:
diff --git a/elasticsearch/.dockerignore b/elasticsearch/.dockerignore
new file mode 100644
index 00000000..37eef9d5
--- /dev/null
+++ b/elasticsearch/.dockerignore
@@ -0,0 +1,6 @@
+# Ignore Docker build files
+Dockerfile
+.dockerignore
+
+# Ignore OS artifacts
+**/.DS_Store
diff --git a/elasticsearch/Dockerfile b/elasticsearch/Dockerfile
new file mode 100644
index 00000000..22528c6d
--- /dev/null
+++ b/elasticsearch/Dockerfile
@@ -0,0 +1,7 @@
+ARG ELASTIC_VERSION
+
+# https://www.docker.elastic.co/
+FROM docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION}
+
+# Add your elasticsearch plugins setup here
+# Example: RUN elasticsearch-plugin install analysis-icu
diff --git a/elasticsearch/config/elasticsearch.yml b/elasticsearch/config/elasticsearch.yml
new file mode 100644
index 00000000..427f4b4b
--- /dev/null
+++ b/elasticsearch/config/elasticsearch.yml
@@ -0,0 +1,20 @@
+---
+## Default Elasticsearch configuration from Elasticsearch base image.
+## https://github.com/elastic/elasticsearch/blob/main/distribution/docker/src/docker/config/elasticsearch.yml
+#
+cluster.name: docker-cluster
+network.host: 0.0.0.0
+
+node.name: elasticsearch
+
+discovery.type: single-node
+
+## X-Pack settings
+## see https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html
+#
+xpack.license.self_generated.type: trial
+xpack.security.enabled: true
+
+## Set the built-in users' passwords.
+# Run the following command from the Elasticsearch directory:
+# ./bin/elasticsearch-setup-passwords interactive
diff --git a/extensions/README.md b/extensions/README.md
new file mode 100644
index 00000000..50016fb6
--- /dev/null
+++ b/extensions/README.md
@@ -0,0 +1,3 @@
+# Extensions
+
+Third-party extensions that enable extra integrations with the Elastic stack.
diff --git a/extensions/curator/.dockerignore b/extensions/curator/.dockerignore
new file mode 100644
index 00000000..37eef9d5
--- /dev/null
+++ b/extensions/curator/.dockerignore
@@ -0,0 +1,6 @@
+# Ignore Docker build files
+Dockerfile
+.dockerignore
+
+# Ignore OS artifacts
+**/.DS_Store
diff --git a/extensions/curator/Dockerfile b/extensions/curator/Dockerfile
new file mode 100644
index 00000000..6cb8cdc6
--- /dev/null
+++ b/extensions/curator/Dockerfile
@@ -0,0 +1,9 @@
+FROM untergeek/curator:8.0.2
+
+USER root
+
+RUN >>/var/spool/cron/crontabs/nobody \
+ echo '* * * * * /curator/curator /.curator/delete_log_files_curator.yml'
+
+ENTRYPOINT ["crond"]
+CMD ["-f", "-d8"]
diff --git a/extensions/curator/README.md b/extensions/curator/README.md
new file mode 100644
index 00000000..5c38786a
--- /dev/null
+++ b/extensions/curator/README.md
@@ -0,0 +1,20 @@
+# Curator
+
+Elasticsearch Curator helps you curate or manage your indices.
+
+## Usage
+
+If you want to include the Curator extension, run Docker Compose from the root of the repository with an additional
+command line argument referencing the `curator-compose.yml` file:
+
+```bash
+$ docker-compose -f docker-compose.yml -f extensions/curator/curator-compose.yml up
+```
+
+This sample setup demonstrates how to run `curator` every minute using `cron`.
+
+All configuration files are available in the `config/` directory.
+
+## Documentation
+
+[Curator Reference](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html)
diff --git a/extensions/curator/config/curator.yml b/extensions/curator/config/curator.yml
new file mode 100644
index 00000000..6777edc9
--- /dev/null
+++ b/extensions/curator/config/curator.yml
@@ -0,0 +1,13 @@
+# Curator configuration
+# https://www.elastic.co/guide/en/elasticsearch/client/curator/current/configfile.html
+
+elasticsearch:
+ client:
+ hosts: [ http://elasticsearch:9200 ]
+ other_settings:
+ username: elastic
+ password: ${ELASTIC_PASSWORD}
+
+logging:
+ loglevel: INFO
+ logformat: default
diff --git a/extensions/curator/config/delete_log_files_curator.yml b/extensions/curator/config/delete_log_files_curator.yml
new file mode 100644
index 00000000..779c67ac
--- /dev/null
+++ b/extensions/curator/config/delete_log_files_curator.yml
@@ -0,0 +1,21 @@
+actions:
+ 1:
+ action: delete_indices
+ description: >-
+ Delete indices. Find which to delete by first limiting the list to
+ logstash- prefixed indices. Then further filter those to prevent deletion
+ of anything less than the number of days specified by unit_count.
+ Ignore the error if the filter does not result in an actionable list of
+ indices (ignore_empty_list) and exit cleanly.
+ options:
+ ignore_empty_list: True
+ disable_action: False
+ filters:
+ - filtertype: pattern
+ kind: prefix
+ value: logstash-
+ - filtertype: age
+ source: creation_date
+ direction: older
+ unit: days
+ unit_count: 2
diff --git a/extensions/curator/curator-compose.yml b/extensions/curator/curator-compose.yml
new file mode 100644
index 00000000..1a4bb17e
--- /dev/null
+++ b/extensions/curator/curator-compose.yml
@@ -0,0 +1,16 @@
+version: '3.7'
+
+services:
+ curator:
+ build:
+ context: extensions/curator/
+ init: true
+ volumes:
+ - ./extensions/curator/config/curator.yml:/.curator/curator.yml:ro,Z
+ - ./extensions/curator/config/delete_log_files_curator.yml:/.curator/delete_log_files_curator.yml:ro,Z
+ environment:
+ ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
+ networks:
+ - elk
+ depends_on:
+ - elasticsearch
diff --git a/extensions/enterprise-search/.dockerignore b/extensions/enterprise-search/.dockerignore
new file mode 100644
index 00000000..37eef9d5
--- /dev/null
+++ b/extensions/enterprise-search/.dockerignore
@@ -0,0 +1,6 @@
+# Ignore Docker build files
+Dockerfile
+.dockerignore
+
+# Ignore OS artifacts
+**/.DS_Store
diff --git a/extensions/enterprise-search/Dockerfile b/extensions/enterprise-search/Dockerfile
new file mode 100644
index 00000000..4f0752e5
--- /dev/null
+++ b/extensions/enterprise-search/Dockerfile
@@ -0,0 +1,4 @@
+ARG ELASTIC_VERSION
+
+# https://www.docker.elastic.co/
+FROM docker.elastic.co/enterprise-search/enterprise-search:${ELASTIC_VERSION}
diff --git a/extensions/enterprise-search/README.md b/extensions/enterprise-search/README.md
new file mode 100644
index 00000000..d6391dba
--- /dev/null
+++ b/extensions/enterprise-search/README.md
@@ -0,0 +1,144 @@
+# Enterprise Search extension
+
+Elastic Enterprise Search is a suite of products for search applications backed by the Elastic Stack.
+
+## Requirements
+
+* 2 GB of free RAM, on top of the resources required by the other stack components and extensions.
+
+The Enterprise Search web application is served on the TCP port `3002`.
+
+## Usage
+
+### Generate an encryption key
+
+Enterprise Search requires one or more [encryption keys][enterprisesearch-encryption] to be configured before the
+initial startup. Failing to do so prevents the server from starting.
+
+Encryption keys can contain any series of characters. Elastic recommends using 256-bit keys for optimal security.
+
+Those encryption keys must be added manually to the [`config/enterprise-search.yml`][config-enterprisesearch] file. By
+default, the list of encryption keys is empty and must be populated using one of the following formats:
+
+```yaml
+secret_management.encryption_keys:
+ - my_first_encryption_key
+ - my_second_encryption_key
+ - ...
+```
+
+```yaml
+secret_management.encryption_keys: [my_first_encryption_key, my_second_encryption_key, ...]
+```
+
+> **Note**
+> To generate a strong random encryption key, you can use the OpenSSL utility or any other online/offline tool of your
+> choice:
+>
+> ```console
+> $ openssl rand -hex 32
+> 680f94e568c90364bedf927b2f0f49609702d3eab9098688585a375b14274546
+> ```
+
+### Enable Elasticsearch's API key service
+
+Enterprise Search requires Elasticsearch's built-in [API key service][es-security] to be enabled in order to start.
+Unless Elasticsearch is configured to enable TLS on the HTTP interface (disabled by default), this service is disabled
+by default.
+
+To enable it, modify the Elasticsearch configuration file in [`elasticsearch/config/elasticsearch.yml`][config-es] and
+add the following setting:
+
+```yaml
+xpack.security.authc.api_key.enabled: true
+```
+
+### Configure the Enterprise Search host in Kibana
+
+Kibana acts as the [management interface][enterprisesearch-kb] to Enterprise Search.
+
+To enable the management experience for Enterprise Search, modify the Kibana configuration file in
+[`kibana/config/kibana.yml`][config-kbn] and add the following setting:
+
+```yaml
+enterpriseSearch.host: http://enterprise-search:3002
+```
+
+### Start the server
+
+To include Enterprise Search in the stack, run Docker Compose from the root of the repository with an additional command
+line argument referencing the `enterprise-search-compose.yml` file:
+
+```console
+$ docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up
+```
+
+Allow a few minutes for the stack to start, then open your web browser at the address to see the
+Enterprise Search home page.
+
+Enterprise Search is configured on first boot with the following default credentials:
+
+* user: *enterprise_search*
+* password: *changeme*
+
+## Security
+
+The Enterprise Search password is defined inside the Compose file via the `ENT_SEARCH_DEFAULT_PASSWORD` environment
+variable. We highly recommend choosing a more secure password than the default one for security reasons.
+
+To do so, change the value `ENT_SEARCH_DEFAULT_PASSWORD` environment variable inside the Compose file **before the first
+boot**:
+
+```yaml
+enterprise-search:
+
+ environment:
+ ENT_SEARCH_DEFAULT_PASSWORD: {{some strong password}}
+```
+
+> **Warning**
+> The default Enterprise Search password can only be set during the initial boot. Once the password is persisted in
+> Elasticsearch, it can only be changed via the Elasticsearch API.
+
+For more information, please refer to [User Management and Security][enterprisesearch-security].
+
+## Configuring Enterprise Search
+
+The Enterprise Search configuration is stored in [`config/enterprise-search.yml`][config-enterprisesearch]. You can
+modify this file using the [Default Enterprise Search configuration][enterprisesearch-config] as a reference.
+
+You can also specify the options you want to override by setting environment variables inside the Compose file:
+
+```yaml
+enterprise-search:
+
+ environment:
+ ent_search.auth.source: standard
+ worker.threads: '6'
+```
+
+Any change to the Enterprise Search configuration requires a restart of the Enterprise Search container:
+
+```console
+$ docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml restart enterprise-search
+```
+
+Please refer to the following documentation page for more details about how to configure Enterprise Search inside a
+Docker container: [Running Enterprise Search Using Docker][enterprisesearch-docker].
+
+## See also
+
+[Enterprise Search documentation][enterprisesearch-docs]
+
+[config-enterprisesearch]: ./config/enterprise-search.yml
+
+[enterprisesearch-encryption]: https://www.elastic.co/guide/en/enterprise-search/current/encryption-keys.html
+[enterprisesearch-security]: https://www.elastic.co/guide/en/workplace-search/current/workplace-search-security.html
+[enterprisesearch-config]: https://www.elastic.co/guide/en/enterprise-search/current/configuration.html
+[enterprisesearch-docker]: https://www.elastic.co/guide/en/enterprise-search/current/docker.html
+[enterprisesearch-docs]: https://www.elastic.co/guide/en/enterprise-search/current/index.html
+[enterprisesearch-kb]: https://www.elastic.co/guide/en/kibana/current/enterprise-search-settings-kb.html
+
+[es-security]: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#api-key-service-settings
+[config-es]: ../../elasticsearch/config/elasticsearch.yml
+[config-kbn]: ../../kibana/config/kibana.yml
diff --git a/extensions/enterprise-search/config/enterprise-search.yml b/extensions/enterprise-search/config/enterprise-search.yml
new file mode 100644
index 00000000..a1f098dd
--- /dev/null
+++ b/extensions/enterprise-search/config/enterprise-search.yml
@@ -0,0 +1,28 @@
+---
+## Enterprise Search core configuration
+## https://www.elastic.co/guide/en/enterprise-search/current/configuration.html
+#
+
+## --------------------- REQUIRED ---------------------
+
+# Encryption keys to protect application secrets.
+secret_management.encryption_keys:
+ # example:
+ #- 680f94e568c90364bedf927b2f0f49609702d3eab9098688585a375b14274546
+
+## ----------------------------------------------------
+
+# IP address Enterprise Search listens on
+ent_search.listen_host: 0.0.0.0
+
+# URL at which users reach Enterprise Search / Kibana
+ent_search.external_url: http://localhost:3002
+kibana.host: http://localhost:5601
+
+# Elasticsearch URL and credentials
+elasticsearch.host: http://elasticsearch:9200
+elasticsearch.username: elastic
+elasticsearch.password: ${ELASTIC_PASSWORD}
+
+# Allow Enterprise Search to modify Elasticsearch settings. Used to enable auto-creation of Elasticsearch indexes.
+allow_es_settings_modification: true
diff --git a/extensions/enterprise-search/enterprise-search-compose.yml b/extensions/enterprise-search/enterprise-search-compose.yml
new file mode 100644
index 00000000..585dda93
--- /dev/null
+++ b/extensions/enterprise-search/enterprise-search-compose.yml
@@ -0,0 +1,20 @@
+version: '3.7'
+
+services:
+ enterprise-search:
+ build:
+ context: extensions/enterprise-search/
+ args:
+ ELASTIC_VERSION: ${ELASTIC_VERSION}
+ volumes:
+ - ./extensions/enterprise-search/config/enterprise-search.yml:/usr/share/enterprise-search/config/enterprise-search.yml:ro,Z
+ environment:
+ JAVA_OPTS: -Xms2g -Xmx2g
+ ENT_SEARCH_DEFAULT_PASSWORD: 'changeme'
+ ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
+ ports:
+ - 3002:3002
+ networks:
+ - elk
+ depends_on:
+ - elasticsearch
diff --git a/extensions/filebeat/.dockerignore b/extensions/filebeat/.dockerignore
new file mode 100644
index 00000000..37eef9d5
--- /dev/null
+++ b/extensions/filebeat/.dockerignore
@@ -0,0 +1,6 @@
+# Ignore Docker build files
+Dockerfile
+.dockerignore
+
+# Ignore OS artifacts
+**/.DS_Store
diff --git a/extensions/filebeat/Dockerfile b/extensions/filebeat/Dockerfile
new file mode 100644
index 00000000..b8dd5f3f
--- /dev/null
+++ b/extensions/filebeat/Dockerfile
@@ -0,0 +1,3 @@
+ARG ELASTIC_VERSION
+
+FROM docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}
diff --git a/extensions/filebeat/README.md b/extensions/filebeat/README.md
new file mode 100644
index 00000000..f2bfd206
--- /dev/null
+++ b/extensions/filebeat/README.md
@@ -0,0 +1,42 @@
+# Filebeat
+
+Filebeat is a lightweight shipper for forwarding and centralizing log data. Installed as an agent on your servers,
+Filebeat monitors the log files or locations that you specify, collects log events, and forwards them either to
+Elasticsearch or Logstash for indexing.
+
+## Usage
+
+**This extension requires the `filebeat_internal` and `beats_system` users to be created and initialized with a
+password.** In case you haven't done that during the initial startup of the stack, please refer to [How to re-execute
+the setup][setup] to run the setup container again and initialize these users.
+
+To include Filebeat in the stack, run Docker Compose from the root of the repository with an additional command line
+argument referencing the `filebeat-compose.yml` file:
+
+```console
+$ docker-compose -f docker-compose.yml -f extensions/filebeat/filebeat-compose.yml up
+```
+
+## Configuring Filebeat
+
+The Filebeat configuration is stored in [`config/filebeat.yml`](./config/filebeat.yml). You can modify this file with
+the help of the [Configuration reference][filebeat-config].
+
+Any change to the Filebeat configuration requires a restart of the Filebeat container:
+
+```console
+$ docker-compose -f docker-compose.yml -f extensions/filebeat/filebeat-compose.yml restart filebeat
+```
+
+Please refer to the following documentation page for more details about how to configure Filebeat inside a Docker
+container: [Run Filebeat on Docker][filebeat-docker].
+
+## See also
+
+[Filebeat documentation][filebeat-doc]
+
+[filebeat-config]: https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-reference-yml.html
+[filebeat-docker]: https://www.elastic.co/guide/en/beats/filebeat/current/running-on-docker.html
+[filebeat-doc]: https://www.elastic.co/guide/en/beats/filebeat/current/index.html
+
+[setup]: ../../README.md#how-to-re-execute-the-setup
diff --git a/extensions/filebeat/config/filebeat.yml b/extensions/filebeat/config/filebeat.yml
new file mode 100644
index 00000000..da8e2ea3
--- /dev/null
+++ b/extensions/filebeat/config/filebeat.yml
@@ -0,0 +1,39 @@
+## Filebeat configuration
+## https://github.com/elastic/beats/blob/main/deploy/docker/filebeat.docker.yml
+#
+
+name: filebeat
+
+filebeat.config:
+ modules:
+ path: ${path.config}/modules.d/*.yml
+ reload.enabled: false
+
+filebeat.autodiscover:
+ providers:
+ # The Docker autodiscover provider automatically retrieves logs from Docker
+ # containers as they start and stop.
+ - type: docker
+ hints.enabled: true
+
+processors:
+ - add_cloud_metadata: ~
+
+monitoring:
+ enabled: true
+ elasticsearch:
+ username: beats_system
+ password: ${BEATS_SYSTEM_PASSWORD}
+
+output.elasticsearch:
+ hosts: [ http://elasticsearch:9200 ]
+ username: filebeat_internal
+ password: ${FILEBEAT_INTERNAL_PASSWORD}
+
+## HTTP endpoint for health checking
+## https://www.elastic.co/guide/en/beats/filebeat/current/http-endpoint.html
+#
+
+http:
+ enabled: true
+ host: 0.0.0.0
diff --git a/extensions/filebeat/filebeat-compose.yml b/extensions/filebeat/filebeat-compose.yml
new file mode 100644
index 00000000..5c5960ef
--- /dev/null
+++ b/extensions/filebeat/filebeat-compose.yml
@@ -0,0 +1,35 @@
+version: '3.7'
+
+services:
+ filebeat:
+ build:
+ context: extensions/filebeat/
+ args:
+ ELASTIC_VERSION: ${ELASTIC_VERSION}
+ # Run as 'root' instead of 'filebeat' (uid 1000) to allow reading
+ # 'docker.sock' and the host's filesystem.
+ user: root
+ command:
+ # Log to stderr.
+ - -e
+ # Disable config file permissions checks. Allows mounting
+ # 'config/filebeat.yml' even if it's not owned by root.
+ # see: https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html
+ - --strict.perms=false
+ volumes:
+ - ./extensions/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro,Z
+ - type: bind
+ source: /var/lib/docker/containers
+ target: /var/lib/docker/containers
+ read_only: true
+ - type: bind
+ source: /var/run/docker.sock
+ target: /var/run/docker.sock
+ read_only: true
+ environment:
+ FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-}
+ BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
+ networks:
+ - elk
+ depends_on:
+ - elasticsearch
diff --git a/extensions/fleet/.dockerignore b/extensions/fleet/.dockerignore
new file mode 100644
index 00000000..37eef9d5
--- /dev/null
+++ b/extensions/fleet/.dockerignore
@@ -0,0 +1,6 @@
+# Ignore Docker build files
+Dockerfile
+.dockerignore
+
+# Ignore OS artifacts
+**/.DS_Store
diff --git a/extensions/fleet/Dockerfile b/extensions/fleet/Dockerfile
new file mode 100644
index 00000000..0b5a691d
--- /dev/null
+++ b/extensions/fleet/Dockerfile
@@ -0,0 +1,8 @@
+ARG ELASTIC_VERSION
+
+FROM docker.elastic.co/beats/elastic-agent:${ELASTIC_VERSION}
+
+# Ensure the 'state' directory exists and is owned by the 'elastic-agent' user,
+# otherwise mounting a named volume in that location creates a directory owned
+# by root:root which the 'elastic-agent' user isn't allowed to write to.
+RUN mkdir state
diff --git a/extensions/fleet/README.md b/extensions/fleet/README.md
new file mode 100644
index 00000000..de800857
--- /dev/null
+++ b/extensions/fleet/README.md
@@ -0,0 +1,69 @@
+# Fleet Server
+
+> **Warning**
+> This extension currently exists for preview purposes and should be considered **EXPERIMENTAL**. Expect regular changes
+> to the default Fleet settings, both in the Elastic Agent and Kibana.
+>
+> See [Known Issues](#known-issues) for a list of issues that need to be addressed before this extension can be
+> considered functional.
+
+Fleet provides central management capabilities for [Elastic Agents][fleet-doc] via an API and web UI served by Kibana,
+with Elasticsearch acting as the communication layer.
+Fleet Server is the central component which allows connecting Elastic Agents to the Fleet.
+
+## Requirements
+
+The Fleet Server exposes the TCP port `8220` for Agent to Server communications.
+
+## Usage
+
+To include Fleet Server in the stack, run Docker Compose from the root of the repository with an additional command line
+argument referencing the `fleet-compose.yml` file:
+
+```console
+$ docker-compose -f docker-compose.yml -f extensions/fleet/fleet-compose.yml up
+```
+
+## Configuring Fleet Server
+
+Fleet Server — like any Elastic Agent — is configured via [Agent Policies][fleet-pol] which can be either managed
+through the Fleet management UI in Kibana, or statically pre-configured inside the Kibana configuration file.
+
+To ease the enrollment of Fleet Server in this extension, docker-elk comes with a pre-configured Agent Policy for Fleet
+Server defined inside [`kibana/config/kibana.yml`][config-kbn].
+
+Please refer to the following documentation page for more details about configuring Fleet Server through the Fleet
+management UI: [Fleet UI Settings][fleet-cfg].
+
+## Known Issues
+
+- Logs and metrics are only collected within the Fleet Server's container. Ultimately, we want to emulate the behaviour
+ of the existing Metricsbeat and Filebeat extensions, and collect logs and metrics from all ELK containers
+ out-of-the-box. Unfortunately, this kind of use-case isn't (yet) well supported by Fleet, and most advanced
+ configurations currently require running Elastic Agents in [standalone mode][fleet-standalone].
+ (Relevant resource: [Migrate from Beats to Elastic Agent][fleet-beats])
+- The Elastic Agent auto-enrolls using the `elastic` super-user. With this approach, you do not need to generate a
+ service token — either using the Fleet management UI or [CLI utility][es-svc-token] — prior to starting this
+ extension. However convenient that is, this approach _does not follow security best practices_, and we recommend
+ generating a service token for Fleet Server instead.
+
+## See also
+
+[Fleet and Elastic Agent Guide][fleet-doc]
+
+## Screenshots
+
+
+
+
+[fleet-doc]: https://www.elastic.co/guide/en/fleet/current/fleet-overview.html
+[fleet-pol]: https://www.elastic.co/guide/en/fleet/current/agent-policy.html
+[fleet-cfg]: https://www.elastic.co/guide/en/fleet/current/fleet-settings.html
+
+[config-kbn]: ../../kibana/config/kibana.yml
+
+[fleet-standalone]: https://www.elastic.co/guide/en/fleet/current/elastic-agent-configuration.html
+[fleet-beats]: https://www.elastic.co/guide/en/fleet/current/migrate-beats-to-agent.html
+[es-svc-token]: https://www.elastic.co/guide/en/elasticsearch/reference/current/service-tokens-command.html
diff --git a/extensions/fleet/agent-apmserver-compose.yml b/extensions/fleet/agent-apmserver-compose.yml
new file mode 100644
index 00000000..06e201a9
--- /dev/null
+++ b/extensions/fleet/agent-apmserver-compose.yml
@@ -0,0 +1,45 @@
+version: '3.7'
+
+# Example of Fleet-enrolled Elastic Agent pre-configured with an agent policy
+# for running the APM Server integration (see kibana.yml).
+#
+# Run with
+# docker-compose \
+# -f docker-compose.yml \
+# -f extensions/fleet/fleet-compose.yml \
+# -f extensions/fleet/agent-apmserver-compose.yml \
+# up
+
+services:
+ apm-server:
+ build:
+ context: extensions/fleet/
+ args:
+ ELASTIC_VERSION: ${ELASTIC_VERSION}
+ volumes:
+ - apm-server:/usr/share/elastic-agent/state:Z
+ environment:
+ FLEET_ENROLL: '1'
+ FLEET_TOKEN_POLICY_NAME: Agent Policy APM Server
+ FLEET_INSECURE: '1'
+ FLEET_URL: http://fleet-server:8220
+ # Enrollment.
+ # (a) Auto-enroll using basic authentication
+ ELASTICSEARCH_USERNAME: elastic
+ ELASTICSEARCH_PASSWORD: ${ELASTIC_PASSWORD:-}
+ # (b) Enroll using a pre-generated enrollment token
+ #FLEET_ENROLLMENT_TOKEN:
+ ports:
+ - 8200:8200
+ hostname: apm-server
+ # Elastic Agent does not retry failed connections to Kibana upon the initial enrollment phase.
+ restart: on-failure
+ networks:
+ - elk
+ depends_on:
+ - elasticsearch
+ - kibana
+ - fleet-server
+
+volumes:
+ apm-server:
diff --git a/extensions/fleet/fleet-compose.yml b/extensions/fleet/fleet-compose.yml
new file mode 100644
index 00000000..e33f47b0
--- /dev/null
+++ b/extensions/fleet/fleet-compose.yml
@@ -0,0 +1,36 @@
+version: '3.7'
+
+services:
+ fleet-server:
+ build:
+ context: extensions/fleet/
+ args:
+ ELASTIC_VERSION: ${ELASTIC_VERSION}
+ volumes:
+ - fleet-server:/usr/share/elastic-agent/state:Z
+ environment:
+ FLEET_SERVER_ENABLE: '1'
+ FLEET_SERVER_INSECURE_HTTP: '1'
+ FLEET_SERVER_HOST: 0.0.0.0
+ FLEET_SERVER_POLICY_ID: fleet-server-policy
+ # Fleet plugin in Kibana
+ KIBANA_FLEET_SETUP: '1'
+ # Enrollment.
+ # (a) Auto-enroll using basic authentication
+ ELASTICSEARCH_USERNAME: elastic
+ ELASTICSEARCH_PASSWORD: ${ELASTIC_PASSWORD:-}
+ # (b) Enroll using a pre-generated service token
+ #FLEET_SERVER_SERVICE_TOKEN:
+ ports:
+ - 8220:8220
+ hostname: fleet-server
+ # Elastic Agent does not retry failed connections to Kibana upon the initial enrollment phase.
+ restart: on-failure
+ networks:
+ - elk
+ depends_on:
+ - elasticsearch
+ - kibana
+
+volumes:
+ fleet-server:
diff --git a/extensions/heartbeat/.dockerignore b/extensions/heartbeat/.dockerignore
new file mode 100644
index 00000000..37eef9d5
--- /dev/null
+++ b/extensions/heartbeat/.dockerignore
@@ -0,0 +1,6 @@
+# Ignore Docker build files
+Dockerfile
+.dockerignore
+
+# Ignore OS artifacts
+**/.DS_Store
diff --git a/extensions/heartbeat/Dockerfile b/extensions/heartbeat/Dockerfile
new file mode 100644
index 00000000..0d7de196
--- /dev/null
+++ b/extensions/heartbeat/Dockerfile
@@ -0,0 +1,3 @@
+ARG ELASTIC_VERSION
+
+FROM docker.elastic.co/beats/heartbeat:${ELASTIC_VERSION}
diff --git a/extensions/heartbeat/README.md b/extensions/heartbeat/README.md
new file mode 100644
index 00000000..82c938f5
--- /dev/null
+++ b/extensions/heartbeat/README.md
@@ -0,0 +1,41 @@
+# Heartbeat
+
+Heartbeat is a lightweight daemon that periodically checks the status of your services and determines whether they are
+available.
+
+## Usage
+
+**This extension requires the `heartbeat_internal` and `beats_system` users to be created and initialized with a
+password.** In case you haven't done that during the initial startup of the stack, please refer to [How to re-execute
+the setup][setup] to run the setup container again and initialize these users.
+
+To include Heartbeat in the stack, run Docker Compose from the root of the repository with an additional command line
+argument referencing the `heartbeat-compose.yml` file:
+
+```console
+$ docker-compose -f docker-compose.yml -f extensions/heartbeat/heartbeat-compose.yml up
+```
+
+## Configuring Heartbeat
+
+The Heartbeat configuration is stored in [`config/heartbeat.yml`](./config/heartbeat.yml). You can modify this file
+with the help of the [Configuration reference][heartbeat-config].
+
+Any change to the Heartbeat configuration requires a restart of the Heartbeat container:
+
+```console
+$ docker-compose -f docker-compose.yml -f extensions/heartbeat/heartbeat-compose.yml restart heartbeat
+```
+
+Please refer to the following documentation page for more details about how to configure Heartbeat inside a
+Docker container: [Run Heartbeat on Docker][heartbeat-docker].
+
+## See also
+
+[Heartbeat documentation][heartbeat-doc]
+
+[heartbeat-config]: https://www.elastic.co/guide/en/beats/heartbeat/current/heartbeat-reference-yml.html
+[heartbeat-docker]: https://www.elastic.co/guide/en/beats/heartbeat/current/running-on-docker.html
+[heartbeat-doc]: https://www.elastic.co/guide/en/beats/heartbeat/current/index.html
+
+[setup]: ../../README.md#how-to-re-execute-the-setup
diff --git a/extensions/heartbeat/config/heartbeat.yml b/extensions/heartbeat/config/heartbeat.yml
new file mode 100644
index 00000000..b1416ea4
--- /dev/null
+++ b/extensions/heartbeat/config/heartbeat.yml
@@ -0,0 +1,40 @@
+## Heartbeat configuration
+## https://github.com/elastic/beats/blob/main/deploy/docker/heartbeat.docker.yml
+#
+
+name: heartbeat
+
+heartbeat.monitors:
+- type: http
+ schedule: '@every 5s'
+ urls:
+ - http://elasticsearch:9200
+ username: heartbeat_internal
+ password: ${HEARTBEAT_INTERNAL_PASSWORD}
+
+- type: icmp
+ schedule: '@every 5s'
+ hosts:
+ - elasticsearch
+
+processors:
+- add_cloud_metadata: ~
+
+monitoring:
+ enabled: true
+ elasticsearch:
+ username: beats_system
+ password: ${BEATS_SYSTEM_PASSWORD}
+
+output.elasticsearch:
+ hosts: [ http://elasticsearch:9200 ]
+ username: heartbeat_internal
+ password: ${HEARTBEAT_INTERNAL_PASSWORD}
+
+## HTTP endpoint for health checking
+## https://www.elastic.co/guide/en/beats/heartbeat/current/http-endpoint.html
+#
+
+http:
+ enabled: true
+ host: 0.0.0.0
diff --git a/extensions/heartbeat/heartbeat-compose.yml b/extensions/heartbeat/heartbeat-compose.yml
new file mode 100644
index 00000000..47e07084
--- /dev/null
+++ b/extensions/heartbeat/heartbeat-compose.yml
@@ -0,0 +1,24 @@
+version: '3.7'
+
+services:
+ heartbeat:
+ build:
+ context: extensions/heartbeat/
+ args:
+ ELASTIC_VERSION: ${ELASTIC_VERSION}
+ command:
+ # Log to stderr.
+ - -e
+ # Disable config file permissions checks. Allows mounting
+ # 'config/heartbeat.yml' even if it's not owned by root.
+ # see: https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html
+ - --strict.perms=false
+ volumes:
+ - ./extensions/heartbeat/config/heartbeat.yml:/usr/share/heartbeat/heartbeat.yml:ro,Z
+ environment:
+ HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-}
+ BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
+ networks:
+ - elk
+ depends_on:
+ - elasticsearch
diff --git a/extensions/logspout/.dockerignore b/extensions/logspout/.dockerignore
new file mode 100644
index 00000000..37eef9d5
--- /dev/null
+++ b/extensions/logspout/.dockerignore
@@ -0,0 +1,6 @@
+# Ignore Docker build files
+Dockerfile
+.dockerignore
+
+# Ignore OS artifacts
+**/.DS_Store
diff --git a/extensions/logspout/Dockerfile b/extensions/logspout/Dockerfile
new file mode 100644
index 00000000..9591df53
--- /dev/null
+++ b/extensions/logspout/Dockerfile
@@ -0,0 +1,5 @@
+# uses ONBUILD instructions described here:
+# https://github.com/gliderlabs/logspout/tree/master/custom
+
+FROM gliderlabs/logspout:master
+ENV SYSLOG_FORMAT rfc3164
diff --git a/extensions/logspout/README.md b/extensions/logspout/README.md
new file mode 100644
index 00000000..2e346485
--- /dev/null
+++ b/extensions/logspout/README.md
@@ -0,0 +1,28 @@
+# Logspout extension
+
+Logspout collects all Docker logs using the Docker logs API, and forwards them to Logstash without any additional
+configuration.
+
+## Usage
+
+If you want to include the Logspout extension, run Docker Compose from the root of the repository with an additional
+command line argument referencing the `logspout-compose.yml` file:
+
+```bash
+$ docker-compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml up
+```
+
+In your Logstash pipeline configuration, enable the `udp` input and set the input codec to `json`:
+
+```logstash
+input {
+ udp {
+ port => 50000
+ codec => json
+ }
+}
+```
+
+## Documentation
+
+
diff --git a/extensions/logspout/build.sh b/extensions/logspout/build.sh
new file mode 100644
index 00000000..c3ff9388
--- /dev/null
+++ b/extensions/logspout/build.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+# source: https://github.com/gliderlabs/logspout/blob/621524e/custom/build.sh
+
+set -e
+apk add --update go build-base git mercurial ca-certificates
+cd /src
+go build -ldflags "-X main.Version=$1" -o /bin/logspout
+apk del go git mercurial build-base
+rm -rf /root/go /var/cache/apk/*
+
+# backwards compatibility
+ln -fs /tmp/docker.sock /var/run/docker.sock
diff --git a/extensions/logspout/logspout-compose.yml b/extensions/logspout/logspout-compose.yml
new file mode 100644
index 00000000..8af149df
--- /dev/null
+++ b/extensions/logspout/logspout-compose.yml
@@ -0,0 +1,19 @@
+version: '3.7'
+
+services:
+ logspout:
+ build:
+ context: extensions/logspout
+ volumes:
+ - type: bind
+ source: /var/run/docker.sock
+ target: /var/run/docker.sock
+ read_only: true
+ environment:
+ ROUTE_URIS: logstash://logstash:50000
+ LOGSTASH_TAGS: docker-elk
+ networks:
+ - elk
+ depends_on:
+ - logstash
+ restart: on-failure
diff --git a/extensions/logspout/modules.go b/extensions/logspout/modules.go
new file mode 100644
index 00000000..f1a22586
--- /dev/null
+++ b/extensions/logspout/modules.go
@@ -0,0 +1,10 @@
+package main
+
+// installs the Logstash adapter for Logspout, and required dependencies
+// https://github.com/looplab/logspout-logstash
+import (
+ _ "github.com/gliderlabs/logspout/healthcheck"
+ _ "github.com/gliderlabs/logspout/transports/tcp"
+ _ "github.com/gliderlabs/logspout/transports/udp"
+ _ "github.com/looplab/logspout-logstash"
+)
diff --git a/extensions/metricbeat/.dockerignore b/extensions/metricbeat/.dockerignore
new file mode 100644
index 00000000..37eef9d5
--- /dev/null
+++ b/extensions/metricbeat/.dockerignore
@@ -0,0 +1,6 @@
+# Ignore Docker build files
+Dockerfile
+.dockerignore
+
+# Ignore OS artifacts
+**/.DS_Store
diff --git a/extensions/metricbeat/Dockerfile b/extensions/metricbeat/Dockerfile
new file mode 100644
index 00000000..6d05bf55
--- /dev/null
+++ b/extensions/metricbeat/Dockerfile
@@ -0,0 +1,3 @@
+ARG ELASTIC_VERSION
+
+FROM docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}
diff --git a/extensions/metricbeat/README.md b/extensions/metricbeat/README.md
new file mode 100644
index 00000000..1da1eaa2
--- /dev/null
+++ b/extensions/metricbeat/README.md
@@ -0,0 +1,49 @@
+# Metricbeat
+
+Metricbeat is a lightweight shipper that you can install on your servers to periodically collect metrics from the
+operating system and from services running on the server. Metricbeat takes the metrics and statistics that it collects
+and ships them to the output that you specify, such as Elasticsearch or Logstash.
+
+## Usage
+
+**This extension requires the `metricbeat_internal`, `monitoring_internal` and `beats_system` users to be created and
+initialized with a password.** In case you haven't done that during the initial startup of the stack, please refer to
+[How to re-execute the setup][setup] to run the setup container again and initialize these users.
+
+To include Metricbeat in the stack, run Docker Compose from the root of the repository with an additional command line
+argument referencing the `metricbeat-compose.yml` file:
+
+```console
+$ docker-compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml up
+```
+
+## Configuring Metricbeat
+
+The Metricbeat configuration is stored in [`config/metricbeat.yml`](./config/metricbeat.yml). You can modify this file
+with the help of the [Configuration reference][metricbeat-config].
+
+Any change to the Metricbeat configuration requires a restart of the Metricbeat container:
+
+```console
+$ docker-compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml restart metricbeat
+```
+
+Please refer to the following documentation page for more details about how to configure Metricbeat inside a
+Docker container: [Run Metricbeat on Docker][metricbeat-docker].
+
+## See also
+
+[Metricbeat documentation][metricbeat-doc]
+
+## Screenshots
+
+
+
+
+[metricbeat-config]: https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-reference-yml.html
+[metricbeat-docker]: https://www.elastic.co/guide/en/beats/metricbeat/current/running-on-docker.html
+[metricbeat-doc]: https://www.elastic.co/guide/en/beats/metricbeat/current/index.html
+
+[setup]: ../../README.md#how-to-re-execute-the-setup
diff --git a/extensions/metricbeat/config/metricbeat.yml b/extensions/metricbeat/config/metricbeat.yml
new file mode 100644
index 00000000..1c2b6cb8
--- /dev/null
+++ b/extensions/metricbeat/config/metricbeat.yml
@@ -0,0 +1,72 @@
+## Metricbeat configuration
+## https://github.com/elastic/beats/blob/main/deploy/docker/metricbeat.docker.yml
+#
+
+name: metricbeat
+
+metricbeat.config:
+ modules:
+ path: ${path.config}/modules.d/*.yml
+ # Reload module configs as they change:
+ reload.enabled: false
+
+metricbeat.autodiscover:
+ providers:
+ - type: docker
+ hints.enabled: true
+
+metricbeat.modules:
+- module: elasticsearch
+ hosts: [ http://elasticsearch:9200 ]
+ username: monitoring_internal
+ password: ${MONITORING_INTERNAL_PASSWORD}
+ xpack.enabled: true
+ period: 10s
+ enabled: true
+- module: logstash
+ hosts: [ http://logstash:9600 ]
+ xpack.enabled: true
+ period: 10s
+ enabled: true
+- module: kibana
+ hosts: [ http://kibana:5601 ]
+ username: monitoring_internal
+ password: ${MONITORING_INTERNAL_PASSWORD}
+ xpack.enabled: true
+ period: 10s
+ enabled: true
+- module: docker
+ metricsets:
+ - container
+ - cpu
+ - diskio
+ - healthcheck
+ - info
+ #- image
+ - memory
+ - network
+ hosts: [ unix:///var/run/docker.sock ]
+ period: 10s
+ enabled: true
+
+processors:
+ - add_cloud_metadata: ~
+
+monitoring:
+ enabled: true
+ elasticsearch:
+ username: beats_system
+ password: ${BEATS_SYSTEM_PASSWORD}
+
+output.elasticsearch:
+ hosts: [ http://elasticsearch:9200 ]
+ username: metricbeat_internal
+ password: ${METRICBEAT_INTERNAL_PASSWORD}
+
+## HTTP endpoint for health checking
+## https://www.elastic.co/guide/en/beats/metricbeat/current/http-endpoint.html
+#
+
+http:
+ enabled: true
+ host: 0.0.0.0
diff --git a/extensions/metricbeat/metricbeat-compose.yml b/extensions/metricbeat/metricbeat-compose.yml
new file mode 100644
index 00000000..5b37a66c
--- /dev/null
+++ b/extensions/metricbeat/metricbeat-compose.yml
@@ -0,0 +1,47 @@
+version: '3.7'
+
+services:
+ metricbeat:
+ build:
+ context: extensions/metricbeat/
+ args:
+ ELASTIC_VERSION: ${ELASTIC_VERSION}
+ # Run as 'root' instead of 'metricbeat' (uid 1000) to allow reading
+ # 'docker.sock' and the host's filesystem.
+ user: root
+ command:
+ # Log to stderr.
+ - -e
+ # Disable config file permissions checks. Allows mounting
+ # 'config/metricbeat.yml' even if it's not owned by root.
+ # see: https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html
+ - --strict.perms=false
+ # Mount point of the host’s filesystem. Required to monitor the host
+ # from within a container.
+ - --system.hostfs=/hostfs
+ volumes:
+ - ./extensions/metricbeat/config/metricbeat.yml:/usr/share/metricbeat/metricbeat.yml:ro,Z
+ - type: bind
+ source: /
+ target: /hostfs
+ read_only: true
+ - type: bind
+ source: /sys/fs/cgroup
+ target: /hostfs/sys/fs/cgroup
+ read_only: true
+ - type: bind
+ source: /proc
+ target: /hostfs/proc
+ read_only: true
+ - type: bind
+ source: /var/run/docker.sock
+ target: /var/run/docker.sock
+ read_only: true
+ environment:
+ METRICBEAT_INTERNAL_PASSWORD: ${METRICBEAT_INTERNAL_PASSWORD:-}
+ MONITORING_INTERNAL_PASSWORD: ${MONITORING_INTERNAL_PASSWORD:-}
+ BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
+ networks:
+ - elk
+ depends_on:
+ - elasticsearch
diff --git a/host/YANLib.HttpApi.Host/Program.cs b/host/YANLib.HttpApi.Host/Program.cs
index 6e5dd7d7..eb32b152 100644
--- a/host/YANLib.HttpApi.Host/Program.cs
+++ b/host/YANLib.HttpApi.Host/Program.cs
@@ -1,10 +1,11 @@
-using System;
-using System.Threading.Tasks;
-using Microsoft.AspNetCore.Builder;
+using Microsoft.AspNetCore.Builder;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Serilog;
using Serilog.Events;
+using System;
+using System.Threading.Tasks;
+using static System.DateTime;
namespace YANLib;
@@ -21,7 +22,7 @@ public async static Task Main(string[] args)
.MinimumLevel.Override("Microsoft", LogEventLevel.Information)
.MinimumLevel.Override("Microsoft.EntityFrameworkCore", LogEventLevel.Warning)
.Enrich.FromLogContext()
- .WriteTo.Async(c => c.File("Logs/logs.txt"))
+ .WriteTo.Async(c => c.File($"Logs/{Now:yyyy-MM-dd}.log"))
.WriteTo.Async(c => c.Console())
.CreateLogger();
diff --git a/kibana/.dockerignore b/kibana/.dockerignore
new file mode 100644
index 00000000..37eef9d5
--- /dev/null
+++ b/kibana/.dockerignore
@@ -0,0 +1,6 @@
+# Ignore Docker build files
+Dockerfile
+.dockerignore
+
+# Ignore OS artifacts
+**/.DS_Store
diff --git a/kibana/Dockerfile b/kibana/Dockerfile
new file mode 100644
index 00000000..9a075bed
--- /dev/null
+++ b/kibana/Dockerfile
@@ -0,0 +1,7 @@
+ARG ELASTIC_VERSION
+
+# https://www.docker.elastic.co/
+FROM docker.elastic.co/kibana/kibana:${ELASTIC_VERSION}
+
+# Add your kibana plugins setup here
+# Example: RUN kibana-plugin install
diff --git a/kibana/config/kibana.yml b/kibana/config/kibana.yml
new file mode 100644
index 00000000..9d4e79ab
--- /dev/null
+++ b/kibana/config/kibana.yml
@@ -0,0 +1,94 @@
+---
+## Default Kibana configuration from Kibana base image.
+## https://github.com/elastic/kibana/blob/main/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.ts
+#
+server.name: kibana
+server.host: 0.0.0.0
+elasticsearch.hosts: [ http://elasticsearch:9200 ]
+
+monitoring.ui.container.elasticsearch.enabled: true
+monitoring.ui.container.logstash.enabled: true
+
+## X-Pack security credentials
+#
+elasticsearch.username: kibana_system
+elasticsearch.password: ${KIBANA_SYSTEM_PASSWORD}
+
+## Encryption keys (optional but highly recommended)
+##
+## Generate with either
+## $ docker container run --rm docker.elastic.co/kibana/kibana:8.6.2 bin/kibana-encryption-keys generate
+## $ openssl rand -hex 32
+##
+## https://www.elastic.co/guide/en/kibana/current/using-kibana-with-security.html
+## https://www.elastic.co/guide/en/kibana/current/kibana-encryption-keys.html
+#
+#xpack.security.encryptionKey:
+#xpack.encryptedSavedObjects.encryptionKey:
+#xpack.reporting.encryptionKey:
+
+## Fleet
+## https://www.elastic.co/guide/en/kibana/current/fleet-settings-kb.html
+#
+xpack.fleet.agents.fleet_server.hosts: [ http://fleet-server:8220 ]
+
+xpack.fleet.outputs:
+ - id: fleet-default-output
+ name: default
+ type: elasticsearch
+ hosts: [ http://elasticsearch:9200 ]
+ is_default: true
+ is_default_monitoring: true
+
+xpack.fleet.packages:
+ - name: fleet_server
+ version: latest
+ - name: system
+ version: latest
+ - name: elastic_agent
+ version: latest
+ - name: apm
+ version: latest
+
+xpack.fleet.agentPolicies:
+ - name: Fleet Server Policy
+ id: fleet-server-policy
+ description: Static agent policy for Fleet Server
+ monitoring_enabled:
+ - logs
+ - metrics
+ package_policies:
+ - name: fleet_server-1
+ package:
+ name: fleet_server
+ - name: system-1
+ package:
+ name: system
+ - name: elastic_agent-1
+ package:
+ name: elastic_agent
+ - name: Agent Policy APM Server
+ id: agent-policy-apm-server
+ description: Static agent policy for the APM Server integration
+ monitoring_enabled:
+ - logs
+ - metrics
+ package_policies:
+ - name: system-1
+ package:
+ name: system
+ - name: elastic_agent-1
+ package:
+ name: elastic_agent
+ - name: apm-1
+ package:
+ name: apm
+ # See the APM package manifest for a list of possible inputs.
+ # https://github.com/elastic/apm-server/blob/v8.5.0/apmpackage/apm/manifest.yml#L41-L168
+ inputs:
+ - type: apm
+ vars:
+ - name: host
+ value: 0.0.0.0:8200
+ - name: url
+ value: http://apm-server:8200
diff --git a/lib/YANLib/YANJson.cs b/lib/YANLib/YANJson.cs
index 71be05f8..e5469bee 100644
--- a/lib/YANLib/YANJson.cs
+++ b/lib/YANLib/YANJson.cs
@@ -11,7 +11,7 @@ public static partial class YANJson
/// The type of the object to be serialized. Must be a reference type.
/// The object to be serialized.
/// A JSON string representing the serialized object.
- public static string SerializePascal(this T mdl) where T : class => JsonSerializer.Serialize(mdl);
+ public static string Serialize(this T mdl) where T : class => JsonSerializer.Serialize(mdl);
///
/// Serializes an enumerable of objects of type to an containing JSON strings representing the serialized objects.
@@ -20,7 +20,7 @@ public static partial class YANJson
/// The type of the objects to be serialized. Must be a reference type.
/// The enumerable of objects to be serialized.
/// An containing JSON strings representing the serialized objects.
- public static IEnumerable SerializePascal(params T[] mdls) where T : class
+ public static IEnumerable Serialize(params T[] mdls) where T : class
{
if (mdls is null || mdls.Length <= 0)
{
@@ -28,7 +28,7 @@ public static IEnumerable SerializePascal(params T[] mdls) where T :
}
for (var i = 0; i < mdls.Length; i++)
{
- yield return mdls[i].SerializePascal();
+ yield return JsonSerializer.Serialize(mdls[i]);
}
}
@@ -39,7 +39,7 @@ public static IEnumerable SerializePascal(params T[] mdls) where T :
/// The type of the objects to be serialized. Must be a reference type.
/// The enumerable of objects to be serialized.
/// An containing JSON strings representing the serialized objects.
- public static IEnumerable SerializePascal(this IEnumerable mdls) where T : class
+ public static IEnumerable Serialize(this IEnumerable mdls) where T : class
{
if (mdls is null || !mdls.Any())
{
@@ -47,7 +47,7 @@ public static IEnumerable SerializePascal(this IEnumerable mdls) w
}
foreach (var mdl in mdls)
{
- yield return mdl.SerializePascal();
+ yield return JsonSerializer.Serialize(mdl);
}
}
@@ -58,7 +58,7 @@ public static IEnumerable SerializePascal(this IEnumerable mdls) w
/// The type of the objects to be serialized. Must be a reference type.
/// The enumerable of objects to be serialized.
/// An containing JSON strings representing the serialized objects.
- public static IEnumerable SerializePascal(this IReadOnlyCollection mdls) where T : class
+ public static IEnumerable Serialize(this IReadOnlyCollection mdls) where T : class
{
if (mdls is null || mdls.Count <= 0)
{
@@ -66,7 +66,7 @@ public static IEnumerable SerializePascal(this IReadOnlyCollection
}
foreach (var mdl in mdls)
{
- yield return mdl.SerializePascal();
+ yield return JsonSerializer.Serialize(mdl);
}
}
@@ -77,7 +77,7 @@ public static IEnumerable SerializePascal(this IReadOnlyCollection
/// The type of the objects to be serialized. Must be a reference type.
/// The enumerable of objects to be serialized.
/// An containing JSON strings representing the serialized objects.
- public static IEnumerable SerializePascal(this IReadOnlyList mdls) where T : class
+ public static IEnumerable Serialize(this IReadOnlyList mdls) where T : class
{
if (mdls is null || mdls.Count <= 0)
{
@@ -85,7 +85,7 @@ public static IEnumerable SerializePascal(this IReadOnlyList mdls)
}
for (var i = 0; i < mdls.Count; i++)
{
- yield return mdls[i].SerializePascal();
+ yield return JsonSerializer.Serialize(mdls[i]);
}
}
@@ -96,7 +96,7 @@ public static IEnumerable SerializePascal(this IReadOnlyList mdls)
/// The type of the objects to be serialized. Must be a reference type.
/// The enumerable of objects to be serialized.
/// An containing JSON strings representing the serialized objects.
- public static IEnumerable SerializePascal(this IReadOnlySet mdls) where T : class
+ public static IEnumerable Serialize(this IReadOnlySet mdls) where T : class
{
if (mdls is null || mdls.Count <= 0)
{
@@ -104,7 +104,7 @@ public static IEnumerable SerializePascal(this IReadOnlySet mdls)
}
foreach (var mdl in mdls)
{
- yield return mdl.SerializePascal();
+ yield return JsonSerializer.Serialize(mdl);
}
}
@@ -228,7 +228,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the object to be deserialized. Must be a reference type.
/// The JSON string to be deserialized.
/// The deserialized object, or if the deserialization fails.
- public static T? DeserializePascal(this string str) where T : class
+ public static T? Deserialize(this string str) where T : class
{
try
{
@@ -247,7 +247,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the objects to be deserialized. Must be a reference type.
/// The array of JSON strings to be deserialized.
/// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
- public static IEnumerable DeserializePascal(params string[] strs) where T : class
+ public static IEnumerable Deserialize(params string[] strs) where T : class
{
if (strs is null || strs.Length <= 0)
{
@@ -255,7 +255,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
}
for (var i = 0; i < strs.Length; i++)
{
- yield return strs[i].DeserializePascal();
+ yield return strs[i].Deserialize();
}
}
@@ -266,7 +266,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the objects to be deserialized. Must be a reference type.
/// The array of JSON strings to be deserialized.
/// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
- public static IEnumerable DeserializePascal(this IEnumerable strs) where T : class
+ public static IEnumerable Deserialize(this IEnumerable strs) where T : class
{
if (strs is null || !strs.Any())
{
@@ -274,7 +274,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
}
foreach (var str in strs)
{
- yield return str.DeserializePascal();
+ yield return str.Deserialize();
}
}
@@ -285,7 +285,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the objects to be deserialized. Must be a reference type.
/// The array of JSON strings to be deserialized.
/// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
- public static IEnumerable DeserializePascal(this IReadOnlyCollection strs) where T : class
+ public static IEnumerable Deserialize(this IReadOnlyCollection strs) where T : class
{
if (strs is null || strs.Count <= 0)
{
@@ -293,7 +293,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
}
foreach (var str in strs)
{
- yield return str.DeserializePascal();
+ yield return str.Deserialize();
}
}
@@ -304,7 +304,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the objects to be deserialized. Must be a reference type.
/// The array of JSON strings to be deserialized.
/// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
- public static IEnumerable DeserializePascal(this IReadOnlyList strs) where T : class
+ public static IEnumerable Deserialize(this IReadOnlyList strs) where T : class
{
if (strs is null || strs.Count <= 0)
{
@@ -312,7 +312,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
}
for (var i = 0; i < strs.Count; i++)
{
- yield return strs[i].DeserializePascal();
+ yield return strs[i].Deserialize();
}
}
@@ -323,7 +323,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the objects to be deserialized. Must be a reference type.
/// The array of JSON strings to be deserialized.
/// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
- public static IEnumerable DeserializePascal(this IReadOnlySet strs) where T : class
+ public static IEnumerable Deserialize(this IReadOnlySet strs) where T : class
{
if (strs is null || strs.Count <= 0)
{
@@ -331,7 +331,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
}
foreach (var str in strs)
{
- yield return str.DeserializePascal();
+ yield return str.Deserialize();
}
}
@@ -460,14 +460,284 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the object to be deserialized. Must be a reference type.
/// The JSON string to be deserialized.
/// The deserialized object, or if the deserialization fails.
- public static T? Deserialize(this string str) where T : class
+ public static T? DeserializeDuo(this string str) where T : class
+ {
+ T? rslt;
+ try
+ {
+
+ rslt = JsonSerializer.Deserialize(str);
+ }
+ catch
+ {
+ rslt = default;
+ }
+ if (rslt is not null && rslt.AnyPropertiesNotDefault())
+ {
+ return rslt;
+ }
+ else
+ {
+ try
+ {
+ return JsonSerializer.Deserialize(str, new JsonSerializerOptions
+ {
+ PropertyNameCaseInsensitive = false,
+ PropertyNamingPolicy = CamelCase
+ });
+ }
+ catch
+ {
+ return default;
+ }
+ }
+ }
+
+ ///
+ /// Deserializes an array of JSON strings to an enumerable of objects of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// Returns an enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ ///
+ /// The type of the objects to be deserialized. Must be a reference type.
+ /// The array of JSON strings to be deserialized.
+ /// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ public static IEnumerable DeserializeDuo(params string[] strs) where T : class
+ {
+ if (strs is null || strs.Length <= 0)
+ {
+ yield break;
+ }
+ for (var i = 0; i < strs.Length; i++)
+ {
+ yield return strs[i].DeserializeDuo();
+ }
+ }
+
+ ///
+ /// Deserializes an array of JSON strings to an enumerable of objects of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// Returns an enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ ///
+ /// The type of the objects to be deserialized. Must be a reference type.
+ /// The array of JSON strings to be deserialized.
+ /// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ public static IEnumerable DeserializeDuo(this IEnumerable strs) where T : class
+ {
+ if (strs is null || !strs.Any())
+ {
+ yield break;
+ }
+ foreach (var str in strs)
+ {
+ yield return str.DeserializeDuo();
+ }
+ }
+
+ ///
+ /// Deserializes an array of JSON strings to an enumerable of objects of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// Returns an enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ ///
+ /// The type of the objects to be deserialized. Must be a reference type.
+ /// The array of JSON strings to be deserialized.
+ /// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ public static IEnumerable DeserializeDuo(this IReadOnlyCollection strs) where T : class
+ {
+ if (strs is null || strs.Count <= 0)
+ {
+ yield break;
+ }
+ foreach (var str in strs)
+ {
+ yield return str.DeserializeDuo();
+ }
+ }
+
+ ///
+ /// Deserializes an array of JSON strings to an enumerable of objects of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// Returns an enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ ///
+ /// The type of the objects to be deserialized. Must be a reference type.
+ /// The array of JSON strings to be deserialized.
+ /// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ public static IEnumerable DeserializeDuo(this IReadOnlyList strs) where T : class
+ {
+ if (strs is null || strs.Count <= 0)
+ {
+ yield break;
+ }
+ for (var i = 0; i < strs.Count; i++)
+ {
+ yield return strs[i].DeserializeDuo();
+ }
+ }
+
+ ///
+ /// Deserializes an array of JSON strings to an enumerable of objects of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// Returns an enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ ///
+ /// The type of the objects to be deserialized. Must be a reference type.
+ /// The array of JSON strings to be deserialized.
+ /// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ public static IEnumerable DeserializeDuo(this IReadOnlySet strs) where T : class
+ {
+ if (strs is null || strs.Count <= 0)
+ {
+ yield break;
+ }
+ foreach (var str in strs)
+ {
+ yield return str.DeserializeDuo();
+ }
+ }
+
+ ///
+ /// Deserializes a JSON string to an object of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// If the deserialization fails, returns .
+ ///
+ /// The type of the object to be deserialized. Must be a reference type.
+ /// The JSON string to be deserialized.
+ /// The deserialized object, or if the deserialization fails.
+ public static T? DeserializeDuoCamelPriority(this string str) where T : class
+ {
+ T? rslt;
+ try
+ {
+
+ rslt = JsonSerializer.Deserialize(str);
+ }
+ catch
+ {
+ rslt = default;
+ }
+ if (rslt is not null && rslt.AnyPropertiesNotDefault())
+ {
+ return rslt;
+ }
+ else
+ {
+ try
+ {
+ return JsonSerializer.Deserialize(str, new JsonSerializerOptions
+ {
+ PropertyNameCaseInsensitive = false,
+ PropertyNamingPolicy = CamelCase
+ });
+ }
+ catch
+ {
+ return default;
+ }
+ }
+ }
+
+ ///
+ /// Deserializes an array of JSON strings to an enumerable of objects of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// Returns an enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ ///
+ /// The type of the objects to be deserialized. Must be a reference type.
+ /// The array of JSON strings to be deserialized.
+ /// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ public static IEnumerable DeserializeDuoCamelPriority(params string[] strs) where T : class
+ {
+ if (strs is null || strs.Length <= 0)
+ {
+ yield break;
+ }
+ for (var i = 0; i < strs.Length; i++)
+ {
+ yield return strs[i].DeserializeDuoCamelPriority();
+ }
+ }
+
+ ///
+ /// Deserializes an array of JSON strings to an enumerable of objects of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// Returns an enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ ///
+ /// The type of the objects to be deserialized. Must be a reference type.
+ /// The array of JSON strings to be deserialized.
+ /// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ public static IEnumerable DeserializeDuoCamelPriority(this IEnumerable strs) where T : class
+ {
+ if (strs is null || !strs.Any())
+ {
+ yield break;
+ }
+ foreach (var str in strs)
+ {
+ yield return str.DeserializeDuoCamelPriority();
+ }
+ }
+
+ ///
+ /// Deserializes an array of JSON strings to an enumerable of objects of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// Returns an enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ ///
+ /// The type of the objects to be deserialized. Must be a reference type.
+ /// The array of JSON strings to be deserialized.
+ /// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ public static IEnumerable DeserializeDuoCamelPriority(this IReadOnlyCollection strs) where T : class
+ {
+ if (strs is null || strs.Count <= 0)
+ {
+ yield break;
+ }
+ foreach (var str in strs)
+ {
+ yield return str.DeserializeDuoCamelPriority();
+ }
+ }
+
+ ///
+ /// Deserializes an array of JSON strings to an enumerable of objects of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// Returns an enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ ///
+ /// The type of the objects to be deserialized. Must be a reference type.
+ /// The array of JSON strings to be deserialized.
+ /// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ public static IEnumerable DeserializeDuoCamelPriority(this IReadOnlyList strs) where T : class
+ {
+ if (strs is null || strs.Count <= 0)
+ {
+ yield break;
+ }
+ for (var i = 0; i < strs.Count; i++)
+ {
+ yield return strs[i].DeserializeDuoCamelPriority();
+ }
+ }
+
+ ///
+ /// Deserializes an array of JSON strings to an enumerable of objects of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// Returns an enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ ///
+ /// The type of the objects to be deserialized. Must be a reference type.
+ /// The array of JSON strings to be deserialized.
+ /// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
+ public static IEnumerable DeserializeDuoCamelPriority(this IReadOnlySet strs) where T : class
+ {
+ if (strs is null || strs.Count <= 0)
+ {
+ yield break;
+ }
+ foreach (var str in strs)
+ {
+ yield return str.DeserializeDuoCamelPriority();
+ }
+ }
+
+ ///
+ /// Deserializes a JSON string to an object of type using the default JSON deserialization settings, with camelCase property names and case sensitivity for property names set to false as additional options.
+ /// If the deserialization fails, returns .
+ ///
+ /// The type of the object to be deserialized. Must be a reference type.
+ /// The JSON string to be deserialized.
+ /// The deserialized object, or if the deserialization fails.
+ public static T? DeserializeStandard(this string str) where T : class
{
try
{
return JsonSerializer.Deserialize(str, new JsonSerializerOptions
{
PropertyNameCaseInsensitive = true,
- PropertyNamingPolicy = CamelCase
+ PropertyNamingPolicy = CamelCase,
});
}
catch
@@ -483,7 +753,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the objects to be deserialized. Must be a reference type.
/// The array of JSON strings to be deserialized.
/// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
- public static IEnumerable Deserialize(params string[] strs) where T : class
+ public static IEnumerable DeserializeStandard(params string[] strs) where T : class
{
if (strs is null || strs.Length <= 0)
{
@@ -491,7 +761,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
}
for (var i = 0; i < strs.Length; i++)
{
- yield return strs[i].Deserialize();
+ yield return strs[i].DeserializeStandard();
}
}
@@ -502,7 +772,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the objects to be deserialized. Must be a reference type.
/// The array of JSON strings to be deserialized.
/// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
- public static IEnumerable Deserialize(this IEnumerable strs) where T : class
+ public static IEnumerable DeserializeStandard(this IEnumerable strs) where T : class
{
if (strs is null || !strs.Any())
{
@@ -510,7 +780,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
}
foreach (var str in strs)
{
- yield return str.Deserialize();
+ yield return str.DeserializeStandard();
}
}
@@ -521,7 +791,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the objects to be deserialized. Must be a reference type.
/// The array of JSON strings to be deserialized.
/// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
- public static IEnumerable Deserialize(this IReadOnlyCollection strs) where T : class
+ public static IEnumerable DeserializeStandard(this IReadOnlyCollection strs) where T : class
{
if (strs is null || strs.Count <= 0)
{
@@ -529,7 +799,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
}
foreach (var str in strs)
{
- yield return str.Deserialize();
+ yield return str.DeserializeStandard();
}
}
@@ -540,7 +810,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the objects to be deserialized. Must be a reference type.
/// The array of JSON strings to be deserialized.
/// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
- public static IEnumerable Deserialize(this IReadOnlyList strs) where T : class
+ public static IEnumerable DeserializeStandard(this IReadOnlyList strs) where T : class
{
if (strs is null || strs.Count <= 0)
{
@@ -548,7 +818,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
}
for (var i = 0; i < strs.Count; i++)
{
- yield return strs[i].Deserialize();
+ yield return strs[i].DeserializeStandard();
}
}
@@ -559,7 +829,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
/// The type of the objects to be deserialized. Must be a reference type.
/// The array of JSON strings to be deserialized.
/// An enumerable of deserialized objects, or if the deserialization fails for any of the input strings.
- public static IEnumerable Deserialize(this IReadOnlySet strs) where T : class
+ public static IEnumerable DeserializeStandard(this IReadOnlySet strs) where T : class
{
if (strs is null || strs.Count <= 0)
{
@@ -567,7 +837,7 @@ public static IEnumerable SerializeCamel(this IReadOnlySet mdls) w
}
foreach (var str in strs)
{
- yield return str.Deserialize();
+ yield return str.DeserializeStandard();
}
}
}
diff --git a/lib/YANLib/YANLib.csproj b/lib/YANLib/YANLib.csproj
index 519bbeb1..6735b8a6 100644
--- a/lib/YANLib/YANLib.csproj
+++ b/lib/YANLib/YANLib.csproj
@@ -9,9 +9,7 @@
YANLib
Yami An
YAN
- Update:
-- YANModel
-- YANJson
+ Update: YANJson
Copyright © 2023
icon.png
https://github.com/Tynab
@@ -24,7 +22,7 @@
MIT
False
Tynab.YANLib
- 2.2.0
+ 2.2.1
diff --git a/lib/YANLib/nuget.config b/lib/YANLib/nuget.config
index 953d08e9..a393e786 100644
--- a/lib/YANLib/nuget.config
+++ b/lib/YANLib/nuget.config
@@ -7,7 +7,7 @@