diff --git a/docs/docset.yml b/docs/docset.yml
new file mode 100644
index 00000000000..2b82d085fc1
--- /dev/null
+++ b/docs/docset.yml
@@ -0,0 +1,508 @@
+project: 'Integration developer guide'
+exclude:
+ - ci_pipelines.md
+ - dashboard_guidelines.md
+ - definitions.md
+ - developer_tsdb_migration_guidelines.md
+ - developer_workflow_bug_fix_older_package_version.md
+ - developer_workflow_design_build_test_integration.md
+ - developer_workflow_fleet_ui.md
+ - documentation_guidelines.md
+ - ecs@mappings_migration_guide.md
+ - fine_tune_integration.md
+ - generic_guidelines.md
+ - how_to_test_new_indexing_features.md
+ - import_from_beats.md
+ - subobjects_adoption_guide.md
+ - testing_and_validation.md
+ - tips_for_building_integrations.md
+cross_links:
+ - docs-content
+ - ecs
+ - elasticsearch
+toc:
+ - toc: extend
+subs:
+ ref: "https://www.elastic.co/guide/en/elasticsearch/reference/current"
+ ref-bare: "https://www.elastic.co/guide/en/elasticsearch/reference"
+ ref-8x: "https://www.elastic.co/guide/en/elasticsearch/reference/8.1"
+ ref-80: "https://www.elastic.co/guide/en/elasticsearch/reference/8.0"
+ ref-7x: "https://www.elastic.co/guide/en/elasticsearch/reference/7.17"
+ ref-70: "https://www.elastic.co/guide/en/elasticsearch/reference/7.0"
+ ref-60: "https://www.elastic.co/guide/en/elasticsearch/reference/6.0"
+ ref-64: "https://www.elastic.co/guide/en/elasticsearch/reference/6.4"
+ xpack-ref: "https://www.elastic.co/guide/en/x-pack/6.2"
+ logstash-ref: "https://www.elastic.co/guide/en/logstash/current"
+ kibana-ref: "https://www.elastic.co/guide/en/kibana/current"
+ kibana-ref-all: "https://www.elastic.co/guide/en/kibana"
+ beats-ref-root: "https://www.elastic.co/guide/en/beats"
+ beats-ref: "https://www.elastic.co/guide/en/beats/libbeat/current"
+ beats-ref-60: "https://www.elastic.co/guide/en/beats/libbeat/6.0"
+ beats-ref-63: "https://www.elastic.co/guide/en/beats/libbeat/6.3"
+ beats-devguide: "https://www.elastic.co/guide/en/beats/devguide/current"
+ auditbeat-ref: "https://www.elastic.co/guide/en/beats/auditbeat/current"
+ packetbeat-ref: "https://www.elastic.co/guide/en/beats/packetbeat/current"
+ metricbeat-ref: "https://www.elastic.co/guide/en/beats/metricbeat/current"
+ filebeat-ref: "https://www.elastic.co/guide/en/beats/filebeat/current"
+ functionbeat-ref: "https://www.elastic.co/guide/en/beats/functionbeat/current"
+ winlogbeat-ref: "https://www.elastic.co/guide/en/beats/winlogbeat/current"
+ heartbeat-ref: "https://www.elastic.co/guide/en/beats/heartbeat/current"
+ journalbeat-ref: "https://www.elastic.co/guide/en/beats/journalbeat/current"
+ ingest-guide: "https://www.elastic.co/guide/en/ingest/current"
+ fleet-guide: "https://www.elastic.co/guide/en/fleet/current"
+ apm-guide-ref: "https://www.elastic.co/guide/en/apm/guide/current"
+ apm-guide-7x: "https://www.elastic.co/guide/en/apm/guide/7.17"
+ apm-app-ref: "https://www.elastic.co/guide/en/kibana/current"
+ apm-agents-ref: "https://www.elastic.co/guide/en/apm/agent"
+ apm-android-ref: "https://www.elastic.co/guide/en/apm/agent/android/current"
+ apm-py-ref: "https://www.elastic.co/guide/en/apm/agent/python/current"
+ apm-py-ref-3x: "https://www.elastic.co/guide/en/apm/agent/python/3.x"
+ apm-node-ref-index: "https://www.elastic.co/guide/en/apm/agent/nodejs"
+ apm-node-ref: "https://www.elastic.co/guide/en/apm/agent/nodejs/current"
+ apm-node-ref-1x: "https://www.elastic.co/guide/en/apm/agent/nodejs/1.x"
+ apm-rum-ref: "https://www.elastic.co/guide/en/apm/agent/rum-js/current"
+ apm-ruby-ref: "https://www.elastic.co/guide/en/apm/agent/ruby/current"
+ apm-java-ref: "https://www.elastic.co/guide/en/apm/agent/java/current"
+ apm-go-ref: "https://www.elastic.co/guide/en/apm/agent/go/current"
+ apm-dotnet-ref: "https://www.elastic.co/guide/en/apm/agent/dotnet/current"
+ apm-php-ref: "https://www.elastic.co/guide/en/apm/agent/php/current"
+ apm-ios-ref: "https://www.elastic.co/guide/en/apm/agent/swift/current"
+ apm-lambda-ref: "https://www.elastic.co/guide/en/apm/lambda/current"
+ apm-attacher-ref: "https://www.elastic.co/guide/en/apm/attacher/current"
+ docker-logging-ref: "https://www.elastic.co/guide/en/beats/loggingplugin/current"
+ esf-ref: "https://www.elastic.co/guide/en/esf/current"
+ kinesis-firehose-ref: "https://www.elastic.co/guide/en/kinesis/{{kinesis_version}}"
+ estc-welcome-current: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current"
+ estc-welcome: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current"
+ estc-welcome-all: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions"
+ hadoop-ref: "https://www.elastic.co/guide/en/elasticsearch/hadoop/current"
+ stack-ref: "https://www.elastic.co/guide/en/elastic-stack/current"
+ stack-ref-67: "https://www.elastic.co/guide/en/elastic-stack/6.7"
+ stack-ref-68: "https://www.elastic.co/guide/en/elastic-stack/6.8"
+ stack-ref-70: "https://www.elastic.co/guide/en/elastic-stack/7.0"
+ stack-ref-80: "https://www.elastic.co/guide/en/elastic-stack/8.0"
+ stack-ov: "https://www.elastic.co/guide/en/elastic-stack-overview/current"
+ stack-gs: "https://www.elastic.co/guide/en/elastic-stack-get-started/current"
+ stack-gs-current: "https://www.elastic.co/guide/en/elastic-stack-get-started/current"
+ javaclient: "https://www.elastic.co/guide/en/elasticsearch/client/java-api/current"
+ java-api-client: "https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/current"
+ java-rest: "https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current"
+ jsclient: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current"
+ jsclient-current: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current"
+ es-ruby-client: "https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current"
+ es-dotnet-client: "https://www.elastic.co/guide/en/elasticsearch/client/net-api/current"
+ es-php-client: "https://www.elastic.co/guide/en/elasticsearch/client/php-api/current"
+ es-python-client: "https://www.elastic.co/guide/en/elasticsearch/client/python-api/current"
+ defguide: "https://www.elastic.co/guide/en/elasticsearch/guide/2.x"
+ painless: "https://www.elastic.co/guide/en/elasticsearch/painless/current"
+ plugins: "https://www.elastic.co/guide/en/elasticsearch/plugins/current"
+ plugins-8x: "https://www.elastic.co/guide/en/elasticsearch/plugins/8.1"
+ plugins-7x: "https://www.elastic.co/guide/en/elasticsearch/plugins/7.17"
+ plugins-6x: "https://www.elastic.co/guide/en/elasticsearch/plugins/6.8"
+ glossary: "https://www.elastic.co/guide/en/elastic-stack-glossary/current"
+ upgrade_guide: "https://www.elastic.co/products/upgrade_guide"
+ blog-ref: "https://www.elastic.co/blog/"
+ curator-ref: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current"
+ curator-ref-current: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current"
+ metrics-ref: "https://www.elastic.co/guide/en/metrics/current"
+ metrics-guide: "https://www.elastic.co/guide/en/metrics/guide/current"
+ logs-ref: "https://www.elastic.co/guide/en/logs/current"
+ logs-guide: "https://www.elastic.co/guide/en/logs/guide/current"
+ uptime-guide: "https://www.elastic.co/guide/en/uptime/current"
+ observability-guide: "https://www.elastic.co/guide/en/observability/current"
+ observability-guide-all: "https://www.elastic.co/guide/en/observability"
+ siem-guide: "https://www.elastic.co/guide/en/siem/guide/current"
+ security-guide: "https://www.elastic.co/guide/en/security/current"
+ security-guide-all: "https://www.elastic.co/guide/en/security"
+ endpoint-guide: "https://www.elastic.co/guide/en/endpoint/current"
+ sql-odbc: "https://www.elastic.co/guide/en/elasticsearch/sql-odbc/current"
+ ecs-ref: "https://www.elastic.co/guide/en/ecs/current"
+ ecs-logging-ref: "https://www.elastic.co/guide/en/ecs-logging/overview/current"
+ ecs-logging-go-logrus-ref: "https://www.elastic.co/guide/en/ecs-logging/go-logrus/current"
+ ecs-logging-go-zap-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current"
+ ecs-logging-go-zerolog-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current"
+ ecs-logging-java-ref: "https://www.elastic.co/guide/en/ecs-logging/java/current"
+ ecs-logging-dotnet-ref: "https://www.elastic.co/guide/en/ecs-logging/dotnet/current"
+ ecs-logging-nodejs-ref: "https://www.elastic.co/guide/en/ecs-logging/nodejs/current"
+ ecs-logging-php-ref: "https://www.elastic.co/guide/en/ecs-logging/php/current"
+ ecs-logging-python-ref: "https://www.elastic.co/guide/en/ecs-logging/python/current"
+ ecs-logging-ruby-ref: "https://www.elastic.co/guide/en/ecs-logging/ruby/current"
+ ml-docs: "https://www.elastic.co/guide/en/machine-learning/current"
+ eland-docs: "https://www.elastic.co/guide/en/elasticsearch/client/eland/current"
+ eql-ref: "https://eql.readthedocs.io/en/latest/query-guide"
+ extendtrial: "https://www.elastic.co/trialextension"
+ wikipedia: "https://en.wikipedia.org/wiki"
+ forum: "https://discuss.elastic.co/"
+ xpack-forum: "https://discuss.elastic.co/c/50-x-pack"
+ security-forum: "https://discuss.elastic.co/c/x-pack/shield"
+ watcher-forum: "https://discuss.elastic.co/c/x-pack/watcher"
+ monitoring-forum: "https://discuss.elastic.co/c/x-pack/marvel"
+ graph-forum: "https://discuss.elastic.co/c/x-pack/graph"
+ apm-forum: "https://discuss.elastic.co/c/apm"
+ enterprise-search-ref: "https://www.elastic.co/guide/en/enterprise-search/current"
+ app-search-ref: "https://www.elastic.co/guide/en/app-search/current"
+ workplace-search-ref: "https://www.elastic.co/guide/en/workplace-search/current"
+ enterprise-search-node-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/enterprise-search-node/current"
+ enterprise-search-php-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/php/current"
+ enterprise-search-python-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/python/current"
+ enterprise-search-ruby-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/ruby/current"
+ elastic-maps-service: "https://maps.elastic.co"
+ integrations-docs: "https://docs.elastic.co/en/integrations"
+ integrations-devguide: "https://www.elastic.co/guide/en/integrations-developer/current"
+ time-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units"
+ byte-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units"
+ apm-py-ref-v: "https://www.elastic.co/guide/en/apm/agent/python/current"
+ apm-node-ref-v: "https://www.elastic.co/guide/en/apm/agent/nodejs/current"
+ apm-rum-ref-v: "https://www.elastic.co/guide/en/apm/agent/rum-js/current"
+ apm-ruby-ref-v: "https://www.elastic.co/guide/en/apm/agent/ruby/current"
+ apm-java-ref-v: "https://www.elastic.co/guide/en/apm/agent/java/current"
+ apm-go-ref-v: "https://www.elastic.co/guide/en/apm/agent/go/current"
+ apm-ios-ref-v: "https://www.elastic.co/guide/en/apm/agent/swift/current"
+ apm-dotnet-ref-v: "https://www.elastic.co/guide/en/apm/agent/dotnet/current"
+ apm-php-ref-v: "https://www.elastic.co/guide/en/apm/agent/php/current"
+ ecloud: "Elastic Cloud"
+ esf: "Elastic Serverless Forwarder"
+ ess: "Elasticsearch Service"
+ ece: "Elastic Cloud Enterprise"
+ eck: "Elastic Cloud on Kubernetes"
+ serverless-full: "Elastic Cloud Serverless"
+ serverless-short: "Serverless"
+ es-serverless: "Elasticsearch Serverless"
+ es3: "Elasticsearch Serverless"
+ obs-serverless: "Elastic Observability Serverless"
+ sec-serverless: "Elastic Security Serverless"
+ serverless-docs: "https://docs.elastic.co/serverless"
+ cloud: "https://www.elastic.co/guide/en/cloud/current"
+ ess-utm-params: "?page=docs&placement=docs-body"
+ ess-baymax: "?page=docs&placement=docs-body"
+ ess-trial: "https://cloud.elastic.co/registration?page=docs&placement=docs-body"
+ ess-product: "https://www.elastic.co/cloud/elasticsearch-service?page=docs&placement=docs-body"
+ ess-console: "https://cloud.elastic.co?page=docs&placement=docs-body"
+ ess-console-name: "Elasticsearch Service Console"
+ ess-deployments: "https://cloud.elastic.co/deployments?page=docs&placement=docs-body"
+ ece-ref: "https://www.elastic.co/guide/en/cloud-enterprise/current"
+ eck-ref: "https://www.elastic.co/guide/en/cloud-on-k8s/current"
+ ess-leadin: "You can run Elasticsearch on your own hardware or use our hosted Elasticsearch Service that is available on AWS, GCP, and Azure. https://cloud.elastic.co/registration{ess-utm-params}[Try the Elasticsearch Service for free]."
+ ess-leadin-short: "Our hosted Elasticsearch Service is available on AWS, GCP, and Azure, and you can https://cloud.elastic.co/registration{ess-utm-params}[try it for free]."
+ ess-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"https://cloud.elastic.co/registration{ess-utm-params}\", title=\"Supported on Elasticsearch Service\"]"
+ ece-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud_ece.svg[link=\"https://cloud.elastic.co/registration{ess-utm-params}\", title=\"Supported on Elastic Cloud Enterprise\"]"
+ cloud-only: "This feature is designed for indirect use by https://cloud.elastic.co/registration{ess-utm-params}[Elasticsearch Service], https://www.elastic.co/guide/en/cloud-enterprise/{ece-version-link}[Elastic Cloud Enterprise], and https://www.elastic.co/guide/en/cloud-on-k8s/current[Elastic Cloud on Kubernetes]. Direct use is not supported."
+ ess-setting-change: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"{ess-trial}\", title=\"Supported on {ess}\"] indicates a change to a supported https://www.elastic.co/guide/en/cloud/current/ec-add-user-settings.html[user setting] for Elasticsearch Service."
+ ess-skip-section: "If you use Elasticsearch Service, skip this section. Elasticsearch Service handles these changes for you."
+ api-cloud: "https://www.elastic.co
+ - api/doc/cloud"
+ api-ece: "https://www.elastic.co
+ - api/doc/cloud-enterprise"
+ api-kibana-serverless: "https://www.elastic.co
+ - api/doc/serverless"
+ es-feature-flag: "This feature is in development and not yet available for use. This documentation is provided for informational purposes only."
+ es-ref-dir: "'{{elasticsearch-root}}
+ - reference'"
+ apm-app: "APM app"
+ uptime-app: "Uptime app"
+ synthetics-app: "Synthetics app"
+ logs-app: "Logs app"
+ metrics-app: "Metrics app"
+ infrastructure-app: "Infrastructure app"
+ siem-app: "SIEM app"
+ security-app: "Elastic Security app"
+ ml-app: "Machine Learning"
+ dev-tools-app: "Dev Tools"
+ ingest-manager-app: "Ingest Manager"
+ stack-manage-app: "Stack Management"
+ stack-monitor-app: "Stack Monitoring"
+ alerts-ui: "Alerts and Actions"
+ rules-ui: "Rules"
+ rac-ui: "Rules and Connectors"
+ connectors-ui: "Connectors"
+ connectors-feature: "Actions and Connectors"
+ stack-rules-feature: "Stack Rules"
+ user-experience: "User Experience"
+ ems: "Elastic Maps Service"
+ ems-init: "EMS"
+ hosted-ems: "Elastic Maps Server"
+ ipm-app: "Index Pattern Management"
+ ingest-pipelines: "ingest pipelines"
+ ingest-pipelines-app: "Ingest Pipelines"
+ ingest-pipelines-cap: "Ingest pipelines"
+ ls-pipelines: "Logstash pipelines"
+ ls-pipelines-app: "Logstash Pipelines"
+ maint-windows: "maintenance windows"
+ maint-windows-app: "Maintenance Windows"
+ maint-windows-cap: "Maintenance windows"
+ custom-roles-app: "Custom Roles"
+ data-source: "data view"
+ data-sources: "data views"
+ data-source-caps: "Data View"
+ data-sources-caps: "Data Views"
+ data-source-cap: "Data view"
+ data-sources-cap: "Data views"
+ project-settings: "Project settings"
+ manage-app: "Management"
+ index-manage-app: "Index Management"
+ data-views-app: "Data Views"
+ rules-app: "Rules"
+ saved-objects-app: "Saved Objects"
+ tags-app: "Tags"
+ api-keys-app: "API keys"
+ transforms-app: "Transforms"
+ connectors-app: "Connectors"
+ files-app: "Files"
+ reports-app: "Reports"
+ maps-app: "Maps"
+ alerts-app: "Alerts"
+ crawler: "Enterprise Search web crawler"
+ ents: "Enterprise Search"
+ app-search-crawler: "App Search web crawler"
+ agent: "Elastic Agent"
+ agents: "Elastic Agents"
+ fleet: "Fleet"
+ fleet-server: "Fleet Server"
+ integrations-server: "Integrations Server"
+ ingest-manager: "Ingest Manager"
+ ingest-management: "ingest management"
+ package-manager: "Elastic Package Manager"
+ integrations: "Integrations"
+ package-registry: "Elastic Package Registry"
+ artifact-registry: "Elastic Artifact Registry"
+ aws: "AWS"
+ stack: "Elastic Stack"
+ xpack: "X-Pack"
+ es: "Elasticsearch"
+ kib: "Kibana"
+ esms: "Elastic Stack Monitoring Service"
+ esms-init: "ESMS"
+ ls: "Logstash"
+ beats: "Beats"
+ auditbeat: "Auditbeat"
+ filebeat: "Filebeat"
+ heartbeat: "Heartbeat"
+ metricbeat: "Metricbeat"
+ packetbeat: "Packetbeat"
+ winlogbeat: "Winlogbeat"
+ functionbeat: "Functionbeat"
+ journalbeat: "Journalbeat"
+ es-sql: "Elasticsearch SQL"
+ esql: "ES|QL"
+ elastic-agent: "Elastic Agent"
+ k8s: "Kubernetes"
+ log-driver-long: "Elastic Logging Plugin for Docker"
+ security: "X-Pack security"
+ security-features: "security features"
+ operator-feature: "operator privileges feature"
+ es-security-features: "Elasticsearch security features"
+ stack-security-features: "Elastic Stack security features"
+ endpoint-sec: "Endpoint Security"
+ endpoint-cloud-sec: "Endpoint and Cloud Security"
+ elastic-defend: "Elastic Defend"
+ elastic-sec: "Elastic Security"
+ elastic-endpoint: "Elastic Endpoint"
+ swimlane: "Swimlane"
+ sn: "ServiceNow"
+ sn-itsm: "ServiceNow ITSM"
+ sn-itom: "ServiceNow ITOM"
+ sn-sir: "ServiceNow SecOps"
+ jira: "Jira"
+ ibm-r: "IBM Resilient"
+ webhook: "Webhook"
+ webhook-cm: "Webhook - Case Management"
+ opsgenie: "Opsgenie"
+ bedrock: "Amazon Bedrock"
+ gemini: "Google Gemini"
+ hive: "TheHive"
+ monitoring: "X-Pack monitoring"
+ monitor-features: "monitoring features"
+ stack-monitor-features: "Elastic Stack monitoring features"
+ watcher: "Watcher"
+ alert-features: "alerting features"
+ reporting: "X-Pack reporting"
+ report-features: "reporting features"
+ graph: "X-Pack graph"
+ graph-features: "graph analytics features"
+ searchprofiler: "Search Profiler"
+ xpackml: "X-Pack machine learning"
+ ml: "machine learning"
+ ml-cap: "Machine learning"
+ ml-init: "ML"
+ ml-features: "machine learning features"
+ stack-ml-features: "Elastic Stack machine learning features"
+ ccr: "cross-cluster replication"
+ ccr-cap: "Cross-cluster replication"
+ ccr-init: "CCR"
+ ccs: "cross-cluster search"
+ ccs-cap: "Cross-cluster search"
+ ccs-init: "CCS"
+ ilm: "index lifecycle management"
+ ilm-cap: "Index lifecycle management"
+ ilm-init: "ILM"
+ dlm: "data lifecycle management"
+ dlm-cap: "Data lifecycle management"
+ dlm-init: "DLM"
+ search-snap: "searchable snapshot"
+ search-snaps: "searchable snapshots"
+ search-snaps-cap: "Searchable snapshots"
+ slm: "snapshot lifecycle management"
+ slm-cap: "Snapshot lifecycle management"
+ slm-init: "SLM"
+ rollup-features: "data rollup features"
+ ipm: "index pattern management"
+ ipm-cap: "Index pattern"
+ rollup: "rollup"
+ rollup-cap: "Rollup"
+ rollups: "rollups"
+ rollups-cap: "Rollups"
+ rollup-job: "rollup job"
+ rollup-jobs: "rollup jobs"
+ rollup-jobs-cap: "Rollup jobs"
+ dfeed: "datafeed"
+ dfeeds: "datafeeds"
+ dfeed-cap: "Datafeed"
+ dfeeds-cap: "Datafeeds"
+ ml-jobs: "machine learning jobs"
+ ml-jobs-cap: "Machine learning jobs"
+ anomaly-detect: "anomaly detection"
+ anomaly-detect-cap: "Anomaly detection"
+ anomaly-job: "anomaly detection job"
+ anomaly-jobs: "anomaly detection jobs"
+ anomaly-jobs-cap: "Anomaly detection jobs"
+ dataframe: "data frame"
+ dataframes: "data frames"
+ dataframe-cap: "Data frame"
+ dataframes-cap: "Data frames"
+ watcher-transform: "payload transform"
+ watcher-transforms: "payload transforms"
+ watcher-transform-cap: "Payload transform"
+ watcher-transforms-cap: "Payload transforms"
+ transform: "transform"
+ transforms: "transforms"
+ transform-cap: "Transform"
+ transforms-cap: "Transforms"
+ dataframe-transform: "transform"
+ dataframe-transform-cap: "Transform"
+ dataframe-transforms: "transforms"
+ dataframe-transforms-cap: "Transforms"
+ dfanalytics-cap: "Data frame analytics"
+ dfanalytics: "data frame analytics"
+ dataframe-analytics-config: "'{dataframe} analytics config'"
+ dfanalytics-job: "'{dataframe} analytics job'"
+ dfanalytics-jobs: "'{dataframe} analytics jobs'"
+ dfanalytics-jobs-cap: "'{dataframe-cap} analytics jobs'"
+ cdataframe: "continuous data frame"
+ cdataframes: "continuous data frames"
+ cdataframe-cap: "Continuous data frame"
+ cdataframes-cap: "Continuous data frames"
+ cdataframe-transform: "continuous transform"
+ cdataframe-transforms: "continuous transforms"
+ cdataframe-transforms-cap: "Continuous transforms"
+ ctransform: "continuous transform"
+ ctransform-cap: "Continuous transform"
+ ctransforms: "continuous transforms"
+ ctransforms-cap: "Continuous transforms"
+ oldetection: "outlier detection"
+ oldetection-cap: "Outlier detection"
+ olscore: "outlier score"
+ olscores: "outlier scores"
+ fiscore: "feature influence score"
+ evaluatedf-api: "evaluate {dataframe} analytics API"
+ evaluatedf-api-cap: "Evaluate {dataframe} analytics API"
+ binarysc: "binary soft classification"
+ binarysc-cap: "Binary soft classification"
+ regression: "regression"
+ regression-cap: "Regression"
+ reganalysis: "regression analysis"
+ reganalysis-cap: "Regression analysis"
+ depvar: "dependent variable"
+ feature-var: "feature variable"
+ feature-vars: "feature variables"
+ feature-vars-cap: "Feature variables"
+ classification: "classification"
+ classification-cap: "Classification"
+ classanalysis: "classification analysis"
+ classanalysis-cap: "Classification analysis"
+ infer-cap: "Inference"
+ infer: "inference"
+ lang-ident-cap: "Language identification"
+ lang-ident: "language identification"
+ data-viz: "Data Visualizer"
+ file-data-viz: "File Data Visualizer"
+ feat-imp: "feature importance"
+ feat-imp-cap: "Feature importance"
+ nlp: "natural language processing"
+ nlp-cap: "Natural language processing"
+ apm-agent: "APM agent"
+ apm-go-agent: "Elastic APM Go agent"
+ apm-go-agents: "Elastic APM Go agents"
+ apm-ios-agent: "Elastic APM iOS agent"
+ apm-ios-agents: "Elastic APM iOS agents"
+ apm-java-agent: "Elastic APM Java agent"
+ apm-java-agents: "Elastic APM Java agents"
+ apm-dotnet-agent: "Elastic APM .NET agent"
+ apm-dotnet-agents: "Elastic APM .NET agents"
+ apm-node-agent: "Elastic APM Node.js agent"
+ apm-node-agents: "Elastic APM Node.js agents"
+ apm-php-agent: "Elastic APM PHP agent"
+ apm-php-agents: "Elastic APM PHP agents"
+ apm-py-agent: "Elastic APM Python agent"
+ apm-py-agents: "Elastic APM Python agents"
+ apm-ruby-agent: "Elastic APM Ruby agent"
+ apm-ruby-agents: "Elastic APM Ruby agents"
+ apm-rum-agent: "Elastic APM Real User Monitoring (RUM) JavaScript agent"
+ apm-rum-agents: "Elastic APM RUM JavaScript agents"
+ apm-lambda-ext: "Elastic APM AWS Lambda extension"
+ project-monitors: "project monitors"
+ project-monitors-cap: "Project monitors"
+ private-location: "Private Location"
+ private-locations: "Private Locations"
+ pwd: "YOUR_PASSWORD"
+ esh: "ES-Hadoop"
+ default-dist: "default distribution"
+ oss-dist: "OSS-only distribution"
+ observability: "Observability"
+ api-request-title: "Request"
+ api-prereq-title: "Prerequisites"
+ api-description-title: "Description"
+ api-path-parms-title: "Path parameters"
+ api-query-parms-title: "Query parameters"
+ api-request-body-title: "Request body"
+ api-response-codes-title: "Response codes"
+ api-response-body-title: "Response body"
+ api-example-title: "Example"
+ api-examples-title: "Examples"
+ api-definitions-title: "Properties"
+ multi-arg: "†footnoteref:[multi-arg,This parameter accepts multiple arguments.]"
+ multi-arg-ref: "†footnoteref:[multi-arg]"
+ yes-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png[Yes,20,15]"
+ no-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png[No,20,15]"
+ es-repo: "https://github.com/elastic/elasticsearch/"
+ es-issue: "https://github.com/elastic/elasticsearch/issues/"
+ es-pull: "https://github.com/elastic/elasticsearch/pull/"
+ es-commit: "https://github.com/elastic/elasticsearch/commit/"
+ kib-repo: "https://github.com/elastic/kibana/"
+ kib-issue: "https://github.com/elastic/kibana/issues/"
+ kibana-issue: "'{kib-repo}issues/'"
+ kib-pull: "https://github.com/elastic/kibana/pull/"
+ kibana-pull: "'{kib-repo}pull/'"
+ kib-commit: "https://github.com/elastic/kibana/commit/"
+ ml-repo: "https://github.com/elastic/ml-cpp/"
+ ml-issue: "https://github.com/elastic/ml-cpp/issues/"
+ ml-pull: "https://github.com/elastic/ml-cpp/pull/"
+ ml-commit: "https://github.com/elastic/ml-cpp/commit/"
+ apm-repo: "https://github.com/elastic/apm-server/"
+ apm-issue: "https://github.com/elastic/apm-server/issues/"
+ apm-pull: "https://github.com/elastic/apm-server/pull/"
+ kibana-blob: "https://github.com/elastic/kibana/blob/current/"
+ apm-get-started-ref: "https://www.elastic.co/guide/en/apm/get-started/current"
+ apm-server-ref: "https://www.elastic.co/guide/en/apm/server/current"
+ apm-server-ref-v: "https://www.elastic.co/guide/en/apm/server/current"
+ apm-server-ref-m: "https://www.elastic.co/guide/en/apm/server/master"
+ apm-server-ref-62: "https://www.elastic.co/guide/en/apm/server/6.2"
+ apm-server-ref-64: "https://www.elastic.co/guide/en/apm/server/6.4"
+ apm-server-ref-70: "https://www.elastic.co/guide/en/apm/server/7.0"
+ apm-overview-ref-v: "https://www.elastic.co/guide/en/apm/get-started/current"
+ apm-overview-ref-70: "https://www.elastic.co/guide/en/apm/get-started/7.0"
+ apm-overview-ref-m: "https://www.elastic.co/guide/en/apm/get-started/master"
+ infra-guide: "https://www.elastic.co/guide/en/infrastructure/guide/current"
+ a-data-source: "a data view"
+ icon-bug: "pass:[]"
+ icon-checkInCircleFilled: "pass:[]"
+ icon-warningFilled: "pass:[]"
diff --git a/docs/extend/_publish_an_integration.md b/docs/extend/_publish_an_integration.md
new file mode 100644
index 00000000000..c247b678867
--- /dev/null
+++ b/docs/extend/_publish_an_integration.md
@@ -0,0 +1,37 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/_publish_an_integration.html
+---
+
+# Publish an integration [_publish_an_integration]
+
+When your integration is done, it’s time to open a PR to include it in the integrations repository. Before opening your PR, run:
+
+```bash
+elastic-package check
+```
+
+The `check` command ensures the package is built correctly, formatted properly, and aligned with the spec. Passing the `check` command is required before adding your integration to the repository.
+
+When CI is happy, merge your PR into the integrations repository.
+
+CI will kick off a build job for the main branch, which can release your integration to the package-storage. It means that it will open a PR to the Package Storage/snapshot with the built integration if only the package version doesn’t already exist in the storage (hasn’t been released yet).
+
+
+## Promote [_promote]
+
+Now that you’ve tested your integration with {{kib}}, it’s time to promote it to staging or production. Run:
+
+```bash
+elastic-package promote
+```
+
+The tool will open 2 pull requests (promote and delete) to the package-storage: target and source branches.
+
+Please review both pull requests on your own, check if CI is happy and merge - first target, then source. Once any PR is merged, the CI will kick off a job to bake a new Docker image of package-storage (tracking). Ideally the "delete" PR should be merged once the CI job for "promote" is done, as the Docker image of previous stage depends on the later one.
+
+::::{tip}
+When you are ready for your changes in the integration to be released, remember to bump up the package version. It is up to you, as the package developer, to decide how many changes you want to release in a single version. For example, you could implement a change in a PR and bump up the package version in the same PR. Or you could implement several changes across multiple pull requests and then bump up the package version in the last of these pull requests or in a separate follow up PR.
+::::
+
+
diff --git a/docs/extend/add-data-stream.md b/docs/extend/add-data-stream.md
new file mode 100644
index 00000000000..d75ff9381bd
--- /dev/null
+++ b/docs/extend/add-data-stream.md
@@ -0,0 +1,43 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/add-a-data-stream.html
+---
+
+# Add a data stream [add-a-data-stream]
+
+A data stream is a logical sub-division of an integration package, dealing with a specific observable aspect of the service or product being observed. For example, the [Apache integration](https://github.com/elastic/integrations/tree/main/packages/apache) has three data streams, each represented by a separate folder of assets in the `data_stream` directory:
+
+```text
+apache
+└───data_stream
+│ └───access
+│ └───error
+│ └───status
+```
+
+::::{admonition}
+**Data streams** allow you to store time series data across multiple indices while giving you a single named resource for requests.
+
+A data stream defines multiple {{es}} assets, like index templates, ingest pipelines, and field definitions. These assets are loaded into {{es}} when a user installs an integration using the {{fleet}} UI in {{kib}}.
+
+A data stream also defines a policy template. Policy templates include variables that allow users to configure the data stream using the {{fleet}} UI in {{kib}}. Then, the {{agent}} interprets the resulting policy to collect relevant information from the product or service being observed. Policy templates can also define an integration’s supported [`deployment_modes`](/extend/define-deployment-modes.md#deployment_modes).
+
+See [data streams](docs-content://reference/ingestion-tools/fleet/data-streams.md) for more information.
+
+::::
+
+
+Bootstrap a new data stream using the TUI wizard. In the directory of your package, run:
+
+```bash
+elastic-package create data-stream
+```
+
+Follow the prompts to name, title, and select your data stream type. Then, run this command each time you add a new data stream to your integration.
+
+Next, manually adjust the data stream:
+
+* define required variables
+* define used fields
+* define ingest pipeline definitions (if necessary)
+* update the {{agent}}'s stream configuration
diff --git a/docs/extend/add-mapping.md b/docs/extend/add-mapping.md
new file mode 100644
index 00000000000..435a0a0511a
--- /dev/null
+++ b/docs/extend/add-mapping.md
@@ -0,0 +1,127 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/add-a-mapping.html
+---
+
+# Edit field mappings [add-a-mapping]
+
+Ingest pipelines create fields in an {{es}} index, but don’t define the fields themselves. Instead, each field requires a defined data type or mapping.
+
+::::{admonition}
+**Mapping** is the process of defining how a document, and the fields it contains, are stored and indexed. Each document is a collection of fields, each having its own data type. When mapping your data, create a mapping definition containing a list of fields pertinent to the document. A mapping definition also includes metadata fields, like the _source field, which customize how the associated metadata of a document is handled.
+
+To learn more, see [mapping](docs-content://manage-data/data-store/mapping.md).
+
+::::
+
+
+In the integration, the `fields` directory serves as the blueprint used to create component templates for the integration. The content from all files in this directory will be unified when the integration is built, so the mappings need to be unique per data stream dataset.
+
+Like ingest pipelines, mappings only apply to the data stream dataset, for our example the `apache.access` dataset.
+
++ NOTE: The names of these files are conventions, any file name with a `.yml` extension will work.
+
+Integrations have had significant enhancements in how ECS fields are defined. Below is a guide on which approach to use, based on the version of Elastic your integration will support.
+
++ . ECS mappings component template (>=8.13.0) Integrations **only** supporting version 8.13.0 and up, can use the [ecs@mappings](https://github.com/elastic/elasticsearch/blob/c2a3ec42632b0339387121efdef13f52c6c66848/x-pack/plugin/core/template-resources/src/main/resources/ecs%40mappings.json) component template installed by Fleet. This makes explicitly declaring ECS fields unnecessary; the `ecs@mappings` component template in Elasticsearch will automatically detect and configure them. However, should ECS fields be explicitly defined, they will overwrite the dynamic mapping provided by the `ecs@mappings` component template. They can also be imported with an `external` declaration, as seen in the example below.
+
++ . Dynamic mappings imports (<8.13.0 & >=8.13.0) Integrations supporting the Elastic stack below version 8.13.0 can still dynamically import ECS field mappings by defining `import_mappings: true` in the ECS section of the `_dev/build/build.yml` file in the root of the package directory. This introduces a [dynamic mapping](https://github.com/elastic/elastic-package/blob/f439b96a74c27c5adfc3e7810ad584204bfaf85d/internal/builder/_static/ecs_mappings.yaml) with most of the ECS definitions. Using this method means that, just like the previous approach, ECS fields don’t need to be defined in your integration, they are dynamically integrated into the package at build time. Explicitly defined ECS fields can be used and will also overwrite this mechanism.
+
+An example of the aformentioned `build.yml` file for this method:
+
++
+
+```yaml
+dependencies:
+ ecs:
+ reference: git@v8.6.0
+ import_mappings: true
+```
+
++ . Explicit ECS mappings As mentioned in the previous two approaches, ECS mappings can still be set explicitly and will overwrite the dynamic mappings. This can be done in two ways: - Using an `external: ecs` reference to import the definition of a specific field. - Literally defining the ECS field.
+
+The `external: ecs` definition instructs the `elastic-package` command line tool to refer to an external ECS reference to resolve specific fields. By default it looks at the [ECS reference](https://raw.githubusercontent.com/elastic/ecs/v8.6.0/generated/ecs/ecs_nested.yml) file hosted on Github. This external reference file is determined by a Git reference found in the `_dev/build/build.yml` file, in the root of the package directory. The `build.yml` file set up for external references:
+
++
+
+```yaml
+dependencies:
+ ecs:
+ reference: git@v8.6.0
+```
+
+Literal definition a ECS field:
+
+```yaml
+- name: cloud.acount.id
+ level: extended
+ type: keyword
+ ignore_above: 1024
+ description: 'The cloud account or organ....'
+ example: 43434343
+```
+
+1. Local ECS reference file (air-gapped setup) By changing the Git reference in in `_dev/build/build.yml` to the path of the downloaded [ECS reference](https://raw.githubusercontent.com/elastic/ecs/v8.6.0/generated/ecs/ecs_nested.yml) file, it is possible for the `elastic-package` command line tool to look for this file locally. Note that the path should be the full path to the reference file. Doing this, our `build.yml` file looks like:
+
+ ```
+ dependencies:
+ ecs:
+ reference: file:///home/user/integrations/packages/apache/ecs_nested.yml
+ ```
+
+
+The `access` data stream dataset of the Apache integration has four different field definitions:
+
++ NOTE: The `apache` integration below has not yet been updated to use the dynamic ECS field definition and uses `external` references to define ECS fields in `ecs.yml`.
+
++
+
+```text
+apache
+└───data_stream
+│ └───access
+│ │ └───elasticsearch/ingest_pipeline
+│ │ │ default.yml
+│ │ └───fields
+│ │ agent.yml
+│ │ base-fields.yml
+│ │ ecs.yml
+│ │ fields.yml
+│ └───error
+│ │ └───elasticsearch/ingest_pipeline
+│ │ │ default.yml
+│ │ └───fields
+│ │ agent.yml
+│ │ base-fields.yml
+│ │ ecs.yml
+│ │ fields.yml
+│ └───status
+```
+
+## agent.yml [_agent_yml]
+
+The `agent.yml` file defines fields used by default processors. Examples: `cloud.account.id`, `container.id`, `input.type`
+
+
+## base-fields.yml [_base_fields_yml]
+
+In this file, the `data_stream` subfields `type`, `dataset` and `namespace` are defined as type `constant_keyword`, the values for these fields are added by the integration. The `event.module` and `event.dataset` fields are defined with a fixed value specific for this integration: - `event.module: apache` - `event.dataset: apache.access` Field `@timestamp` is defined here as type `date`.
+
+
+## fields.yml [_fields_yml]
+
+Here we define fields that we need in our integration and are not found in the ECS. The example below defines field `apache.access.ssl.protocol` in the Apache integration.
+
++
+
+```yaml
+- name: apache.access
+ type: group
+ fields:
+ - name: ssl.protocol
+ type: keyword
+ description: |
+ SSL protocol version.
+```
+
+Learn more about fields in the [general guidelines](/extend/general-guidelines.md#_document_all_fields).
diff --git a/docs/extend/asset-testing.md b/docs/extend/asset-testing.md
new file mode 100644
index 00000000000..999ea668bff
--- /dev/null
+++ b/docs/extend/asset-testing.md
@@ -0,0 +1,64 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/asset-testing.html
+---
+
+# Asset testing [asset-testing]
+
+Elastic Packages define assets to be loaded into {{es}} and {{kib}}. Asset loading tests exercise install a package to ensure that its assets are loaded into {{es}} and {{kib}} as expected.
+
+
+## Conceptual process [asset-testing-concepts]
+
+Conceptually, running an asset load test involves the following steps:
+
+1. Build the package.
+2. Deploy {{es}}, {{kib}}, and the {{package-registry}} (all of which are part of the {{stack}}). This step takes time, so you should typically do it once as a prerequisite to running asset loading tests on multiple packages.
+3. Install the package.
+4. Use various {{kib}} and {{es}} APIs to confirm that the package assets were loaded into {{kib}} and {{es}} as expected.
+5. Remove the package.
+
+
+## Define an asset loading test [define-asset-test]
+
+As a package developer, there is no work required to define an asset loading test for your package. All the necessary information is contained in the package files.
+
+
+## Run an asset loading test [running-asset-test]
+
+First, you must build your package. This step corresponds to step 1 in the [Conceptual process](#asset-testing-concepts) section.
+
+Navigate to the root folder of the package, or any sub-folder under it, and run the following command.
+
+```bash
+elastic-package build
+```
+
+Next, deploy {{es}}, {{kib}}, and the {{package-registry}}. This step corresponds to step 2 in the [Conceptual process](#asset-testing-concepts) section.
+
+```bash
+elastic-package stack up -d
+```
+
+To view a list of the available options for this command, run `elastic-package stack up -h` or `elastic-package help stack up`.
+
+Next, set the environment variables that are required for additional `elastic-package` commands.
+
+```bash
+$(elastic-package stack shellinit)
+```
+
+Next, invoke the asset loading test runner. This step corresponds to steps 3 to 5 in the [Conceptual process](#asset-testing-concepts) section.
+
+Navigate to the root folder of the package, or any sub-folder under it, and run the following command.
+
+```bash
+elastic-package test asset
+```
+
+Finally, when all the asset loading tests have completed, bring down the {{stack}}. This step corresponds to step 4 in the [Conceptual process](#asset-testing-concepts) section.
+
+```bash
+elastic-package stack down
+```
+
diff --git a/docs/extend/build-create-package.md b/docs/extend/build-create-package.md
new file mode 100644
index 00000000000..64fe6d9c550
--- /dev/null
+++ b/docs/extend/build-create-package.md
@@ -0,0 +1,23 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/build-create-package.html
+---
+
+# Create a new package [build-create-package]
+
+Rather than copying the source of an existing package, we recommend using the `elastic-package create` command to build a new package. Running this command ensures that your integration follows the latest recommendations for the package format.
+
+Use the `elastic-package` TUI wizard to bootstrap a new package:
+
+```bash
+elastic-package create package
+```
+
+The wizard walks you through the creation of the package, including setting a package name, version, category, etc. When the wizard completes, you’ll have a basic package complete with a sample manifest, changelog, documentation, and screenshot.
+
+::::{note}
+It may not do anything yet, but your integration can be built and loaded into your locally running package registry from this step forward. Jump to [Build](/extend/build-it.md) at any point in this documentation to take your integration for a test run.
+
+::::
+
+
diff --git a/docs/extend/build-it.md b/docs/extend/build-it.md
new file mode 100644
index 00000000000..744336e26db
--- /dev/null
+++ b/docs/extend/build-it.md
@@ -0,0 +1,25 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/build-it.html
+---
+
+# Build [build-it]
+
+To format, lint, and build your integration, in that order, run:
+
+```bash
+elastic-package check
+```
+
+Problems and potential solutions will display in the console. Fix them and rerun the command. Alternatively, skip formatting and linting with the `build` command:
+
+```bash
+elastic-package build
+```
+
+With the package built, run the following command from inside of the integration directory to recycle the package-registry docker container. This refreshes the {{fleet}} UI, allowing it to pick up the new integration in {{kib}}.
+
+```bash
+elastic-package stack up --services package-registry
+```
+
diff --git a/docs/extend/build-new-integration.md b/docs/extend/build-new-integration.md
new file mode 100644
index 00000000000..fec8cf7ab1c
--- /dev/null
+++ b/docs/extend/build-new-integration.md
@@ -0,0 +1,38 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/build-a-new-integration.html
+---
+
+# Build an integration [build-a-new-integration]
+
+Ready to monitor, ingest, and visualize something? Let’s get started.
+
+* [Overview and prerequisites](/extend/build-overview.md)
+* [Spin up the {{stack}}](/extend/build-spin-stack.md)
+* [Create a new package](/extend/build-create-package.md)
+* [Add a data stream](/extend/add-data-stream.md)
+* [Define deployment modes](/extend/define-deployment-modes.md)
+* [Edit ingest pipelines](/extend/edit-ingest-pipeline.md)
+* [Edit field mappings](/extend/add-mapping.md)
+* [Create and export dashboards](/extend/create-dashboards.md)
+* [Testing and validation](/extend/testing-validation.md)
+* [Finishing touches](/extend/finishing-touches.md)
+* [Tips for building integrations](/extend/tips-for-building.md)
+
+::::{tip}
+Familiar with the {{stack}} and just want a quick way to get started? See [*Quick start: Sample integration*](/extend/quick-start.md).
+::::
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/extend/build-overview.md b/docs/extend/build-overview.md
new file mode 100644
index 00000000000..4ede5bc9b12
--- /dev/null
+++ b/docs/extend/build-overview.md
@@ -0,0 +1,14 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/build-overview.html
+---
+
+# Overview and prerequisites [build-overview]
+
+Before building an integration, you should have an understanding of the following:
+
+* {{stack}} concepts, like data streams, ingest pipelines, and mappings
+* The [*Package specification*](/extend/package-spec.md)
+
+In addition, you must have [`elastic-package`](/extend/elastic-package.md) installed on your machine. Using `elastic-package` is recommended for integration maintainers as it provides crucial utilities and scripts for building out integrations.
+
diff --git a/docs/extend/build-spin-stack.md b/docs/extend/build-spin-stack.md
new file mode 100644
index 00000000000..c5774f444dd
--- /dev/null
+++ b/docs/extend/build-spin-stack.md
@@ -0,0 +1,31 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/build-spin-stack.html
+---
+
+# Spin up the Elastic Stack [build-spin-stack]
+
+The [`elastic-package`](/extend/elastic-package.md) tool provides a quick way to spin up the {{stack}}. The following command deploys {{es}}, {{kib}}, and the {{package-registry}}:
+
+```bash
+elastic-package stack up -v -d
+```
+
+To view a list of the available options for this command, run:
+
+```bash
+elastic-package stack up -h
+```
+
+When complete, go to [http://localhost:5601](http://localhost:5601) and log in with the username `elastic` and the password `changeme`.
+
+::::{tip}
+Development time over? Tear down the {{stack}} with:
+
+```bash
+elastic-package stack down
+```
+
+::::
+
+
diff --git a/docs/extend/changelog-spec.md b/docs/extend/changelog-spec.md
new file mode 100644
index 00000000000..334d9440834
--- /dev/null
+++ b/docs/extend/changelog-spec.md
@@ -0,0 +1,59 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/changelog-spec.html
+---
+
+# changelog.yml [changelog-spec]
+
+The integration’s changelog.
+
+**required**
+
+Included from the package-spec repository. This will update when the spec is updated.
+
+```yaml
+##
+## Describes the specification for the package's CHANGELOG file
+##
+spec:
+ # Everything under here follows JSON schema (https://json-schema.org/), written as YAML for readability
+ type: array
+ items:
+ type: object
+ additionalProperties: false
+ properties:
+ version:
+ description: Package version.
+ $ref: "./manifest.spec.yml#/definitions/version"
+ changes:
+ description: List of changes in package version.
+ type: array
+ items:
+ type: object
+ additionalProperties: false
+ properties:
+ description:
+ description: Description of change.
+ type: string
+ examples:
+ - "Fix broken template"
+ type:
+ description: Type of change.
+ type: string
+ enum:
+ - "breaking-change"
+ - "bugfix"
+ - "enhancement"
+ link:
+ description: Link to issue or PR describing change in detail.
+ type: string
+ examples:
+ - "https://github.com/elastic/integrations/pull/550"
+ required:
+ - description
+ - type
+ - link
+ required:
+ - version
+ - changes
+```
diff --git a/docs/extend/create-dashboards.md b/docs/extend/create-dashboards.md
new file mode 100644
index 00000000000..7462eef690a
--- /dev/null
+++ b/docs/extend/create-dashboards.md
@@ -0,0 +1,123 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/create-dashboards.html
+---
+
+# Create and export dashboards [create-dashboards]
+
+Visualizing integration data in a meaningful way is an important aspect of an integration.
+
+When creating a new integration, it’s important to add dashboards.
+
+To get started, create a new dashboard, or customize an existing one. You can use `elastic-package` to boot up the service stack. Navigate to the package you want to create dashboards for, and run:
+
+```bash
+elastic-package service
+```
+
+When you’re done making changes, you can use `elastic-package` to export the dashboards and their dependencies to the package source.
+
+
+## Dashboard planning [_dashboard_planning]
+
+Many integrations cover more than one component of a target system. For example, the RabbitMQ module provides several metricsets covering connection, exchange, node, queue. It makes sense to break this information down into several interconnected dashboards. The default one is an overview of a target system, and the others provide deep-dives into the various parts of the target system. The content of the Overview dashboard should be cherry-picked from all datasets and individually compiled for every such integration.
+
+
+### Metrics [_metrics]
+
+Always check the type of a metric and ensure that the correct transformation is applied where applicable. For example, in most cases for cumulative counters, it makes sense to use the rate function.
+
+
+### Visualization type [_visualization_type]
+
+For new visualizations, we recommend using Lens first. If what you’re trying to achieve cannot be accomplished with the current capabilities of Lens, try TSVB.
+
+
+### Filters [_filters]
+
+When building a dashboard, always consider adding a filter dropdown. Why? In most cases, the integrations monitor multiple instances of a target system, so we need to provide a way to switch between them.
+
+To build a filter dropdown, use the Controls visualization. Here’s an example of a host name dropdown that you can add to the System dashboard:
+
+
+### Navigation [_navigation]
+
+If an integration has several dashboards, ensure that you can easily navigate all of them. To build dashboard navigation, use the Markdown visualization type.
+
+For example, the System dashboard provides the following navigation:
+
+Source:
+
+```text
+[System Overview](#/dashboard/system-Metrics-system-overview-ecs) | [Host Overview](#/dashboard/system-79ffd6e0-faa0-11e6-947f-177f697178b8-ecs) |
+[Containers overview](#/dashboard/system-CPU-slash-Memory-per-container-ecs)
+```
+
+While this can work, it doesn’t highlight the selected dashboard. Unfortunately the Markdown control is not optimized for navigation, which makes it cumbersome to build navigation with highlighted links because each link should be highlighted separately. This means that the navigation control you’re building has to be cloned as many times as there are dashboard to ensure proper link highlighting. E.g.
+
+```text
+**[System Overview](#/dashboard/system-Metrics-system-overview-ecs)** | [Host Overview](#/dashboard/system-79ffd6e0-faa0-11e6-947f-177f697178b8-ecs) |
+[Containers overview](#/dashboard/system-CPU-slash-Memory-per-container-ecs)
+
+[System Overview](#/dashboard/system-Metrics-system-overview-ecs) | **[Host Overview](#/dashboard/system-79ffd6e0-faa0-11e6-947f-177f697178b8-ecs)** |
+[Containers overview](#/dashboard/system-CPU-slash-Memory-per-container-ecs)
+
+[System Overview](#/dashboard/system-Metrics-system-overview-ecs) | [Host Overview](#/dashboard/system-79ffd6e0-faa0-11e6-947f-177f697178b8-ecs) |
+**[Containers overview](#/dashboard/system-CPU-slash-Memory-per-container-ecs)**
+```
+
+
+### Target system name [_target_system_name]
+
+Currently we don’t make it a rule to show on a dashboard what system it’s designed to monitor. The only way to see it is through the dashboard name.
+
+When using multiple dashboards on bigger screens, it makes it hard to distinguish between the dashboards. You can improve this by using the Markdown control to display the target system the dashboard is used for.
+
+
+### Naming [_naming]
+
+When building dashboards, use the following naming convention.
+
+
+#### Visualizations [_visualizations]
+
+```text
+ []
+```
+
+Examples:
+
+* Memory Usage Gauge [Metrics System]
+* New groups [Logs System]
+
+Rename all visualizations added to a dashboard only to show the part.
+
+
+#### Dashboards [_dashboards]
+
+```text
+[]
+```
+
+Examples:
+
+* [Metrics System] Host overview
+* [Metrics MongoDB] Overview
+
+
+### Screenshots [_screenshots]
+
+Letter casing is important for screenshot descriptions. Descriptions are shown in the {{kib}} UI, so try and keep them clean and consistent.
+
+These descriptions are visualized in the {{kib}} UI. It would be better experience to have them clean and consistent.
+
+* Bad candidate: filebeat running on ec2 machine
+* Good candidates: {{filebeat}} running on AWS EC2 machine
+
+
+## Exporting [_exporting]
+
+```bash
+elastic-package export
+```
+
diff --git a/docs/extend/dashboard-guidelines.md b/docs/extend/dashboard-guidelines.md
new file mode 100644
index 00000000000..6bcd351beeb
--- /dev/null
+++ b/docs/extend/dashboard-guidelines.md
@@ -0,0 +1,159 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/dashboard-guidelines.html
+---
+
+# Dashboard guidelines [dashboard-guidelines]
+
+A [Kibana dashboard](docs-content://explore-analyze/dashboards.md) is a set of one or more panels, also referred to as visualizations. Panels display data in charts, tables, maps, and more. Dashboards support several types of panels to display your data, and several options to create panels.
+
+The goal of each integration dashboard is to:
+
+* Provide a way to explore ingested data out of the box.
+* Provide an overview of the monitored resources through installing the integration.
+
+Each integration package should contain one or more dashboards.
+
+
+## Dashboard Best Practices [_dashboard_best_practices]
+
+Following are recommended best practices for designing Kibana dashboards.
+
+
+### Build dashboards on stable versions [_build_dashboards_on_stable_versions]
+
+Avoid building dashboards on SNAPSHOT versions because as long as the release is not stable behavior changes might render your dashboard unusable. The only supported approach is to use a globally released version from the [official releases list](https://www.elastic.co/downloads/past-releases#kibana).
+
+
+### Not too many visualizations per dashboard [_not_too_many_visualizations_per_dashboard]
+
+Include only necessary visualizations inside a dashboard, and, when possible, split them across separate dashboards. Linking can be done:
+
+* By using a Markdown visualization to improve performance
+* Use [drilldowns](docs-content://explore-analyze/dashboards/drilldowns.md) to connect dashboards where they make sense.
+
+
+### Out of date fields in dashboards [_out_of_date_fields_in_dashboards]
+
+The dashboards must be updated to reflect any changes to field names or types. If a pull request updates a field name or type, make sure it is correctly updated in any dashboard the field is being used in.
+
+
+### Add visualizations by value, not by reference [_add_visualizations_by_value_not_by_reference]
+
+Kibana visualizations can be added into a dashboard by value or by reference. Historically, adding by value did not exist. Switching to value has the advantage that the dashboards are fully self contained and only need a single request to be installed.
+
+To achieve this:
+
+* Migrate existing dashboards from `by reference` to `by value`.
+* Create new dashboards adding visualizations by value.
+
+A migration script is available to help with the migration: [flash1293/legacy_vis_analyzer](https://github.com/elastic/visualizations_integrations_tools)
+
+
+### Choose the context of your Dashboard [_choose_the_context_of_your_dashboard]
+
+You should always try to understand as much as possible what kind of context your users need to interact with the dashboard. Keep the minimal context needed by answering the following questions:
+
+* Who is going to use this dashboard?
+* How much time will the users have?
+* What is the main goal of this dashboard and what are any secondary goals?
+* What kind of charts can help users identify insights in the most immediate and clear way?
+
+
+### Organisation and hierarchy matters in your dashboards [_organisation_and_hierarchy_matters_in_your_dashboards]
+
+Keep the following guidelines in mind when positioning your elements on dashboards:
+
+* Keep related visualizations close to each other.
+
+ :::{image} ../images/grouping-in-visualizations.png
+ :alt: Closely grouped visualizations
+ :::
+
+* Use Markdown to create blocks of related content.
+
+ :::{image} ../images/markdown-grouping.png
+ :alt: Markdown grouping in visualizations
+ :::
+
+* Reading Direction
+
+ Most people are used to reading from top to bottom. Place at the top of your page the most important charts and the ones that could give a brief and immediate summary of the context. A good general guidelines is to increase the level of detail as you approach the bottom of the dashboard. This way, users interested in getting all the information can obtain it without requiring too much effort, and other users can gather what they need from only a quick glance at the topmost dashboards.
+
+* Central focal point
+
+ Placing a big chart at the center of a dashboard, especially one with prominent visual shapes such as rectangles, helps to reinforce a natural visual focal point that lies in the center of the interface.
+
+ :::{image} ../images/rows-in-visualizations.png
+ :alt: Central focal point in visualization
+ :::
+
+
+
+### Use Margins [_use_margins]
+
+Kibana dashboards offer the possibility to apply margins between visualizations, and this is highly recommended. Margins create separation between charts, which is an important visual feature, and they help users to identify when two elements belong together. At the same time, the added space makes the interface appear more clean and elegant.
+
+
+## Visualization Best Practices [_visualization_best_practices]
+
+Following are recommended best practices for designing Kibana vizualizations.
+
+
+### Lens vs TSVB visualizations [_lens_vs_tsvb_visualizations]
+
+**Always use Lens**, when possible. It’s the best choice to be consistent and up to date.
+
+When possible, migrate dashboards from TSVB to Lens. If it’s not possible, please engage with the Kibana team to identify any gaps that prevent full TSVB to Lens dashboard migration.
+
+
+### Visualizations should contain a filter [_visualizations_should_contain_a_filter]
+
+Kibana visualizations can define a filter to avoid performance issues when querying all metrics (`metrics-*`) or logs (`logs-*`) indices.
+
+It is recommended to set a filter in each visualization at least by the required `data_stream.dataset`. For more details, refer to the the [Elastic data stream naming scheme](https://www.elastic.co/blog/an-introduction-to-the-elastic-data-stream-naming-scheme).
+
+As much as possible, avoid using general filters, that is filters with `-*`. Combine multiple fields and values inside a filter with AND/OR operators. Although your filter might become more complex, it will avoid extra queries.
+
+Example:
+
+:::{image} ../images/filter-in-visualization.png
+:alt: Filter in a visualization
+:::
+
+
+### Do not use library visualizations [_do_not_use_library_visualizations]
+
+Do not use the visualizations that appear in **Analytics > Visualize library**. Instead, define visualizations as part of the dashboard. This is the default when creating new panels by clicking **Add new visualization** on the dashboard. If some panels are already saved to the library, you can unlink them and delete them from the library
+
+There are some cases where library visualizations are preferable. It makes sense, for example, if a given visualization always has to be exactly the same on multiple dashboards or if its users frequently look at the visualization without looking at the whole dashboard.
+
+
+## Use dashboard-native controls [_use_dashboard_native_controls]
+
+The **Input controls** visualization type is deprecated in favor of **Controls** embedded into the dashboard itself. The **Controls** dropdown in the Dashboard menu bar should be used. Refer to [Filter dashboard data with controls](docs-content://explore-analyze/dashboards/add-controls.md) for more information.
+
+
+### Keep Consistent Color [_keep_consistent_color]
+
+Use color to distinguish categories, represent quantity/density, and highlight data. When using color in this way, be aware that too many colors in a single chart can create noise and hinder quick comprehension.
+
+[Elastic UI](https://elastic.github.io/eui/#/elastic-charts/creating-charts) provides guidance for correct color choice. Colors provided there for visualization have been tested for accessibility contrast. By using them, you are sure properly serve the largest possible audience.
+
+If your dashboard is made to identify specific behaviors, it might be interesting to consider a color setting that could help to point those out. Use a neutral color for generic elements and an accented color for the things that you want to highlight.
+
+:::{image} ../images/colors-in-visualizations.png
+:alt: Colors in visualizations
+:::
+
+
+## Titles in Visualisations matter [_titles_in_visualisations_matter]
+
+Titles can have a strong visual impact on dashboards, especially when there are a lot of small charts. Two principles can generally be followed:
+
+* Remove unnecessary or repetitive titles when the information is already explained or written within the chart.
+* When a title is needed, make it self explanatory and exhaustive. This way, you will be able to remove axis titles and other specifications leaving more space for the chart itself.
+
+:::{image} ../images/titles-in-visualizations.png
+:alt: Titles in visualizations
+:::
diff --git a/docs/extend/data-stream-spec.md b/docs/extend/data-stream-spec.md
new file mode 100644
index 00000000000..11ff3ec2e27
--- /dev/null
+++ b/docs/extend/data-stream-spec.md
@@ -0,0 +1,128 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/data-stream-spec.html
+---
+
+# data_stream [data-stream-spec]
+
+Data stream assets, including ingest pipelines, field definitions, metadata, and sample events.
+
+**required**
+
+Included from the package-spec repository. This will update when the spec is updated.
+
+```yaml
+spec:
+ additionalContents: false
+ totalContentsLimit: 500
+ contents:
+ - description: Folder containing a single data stream definition
+ type: folder
+ pattern: '^([a-z0-9]{2}|[a-z0-9][a-z0-9_]+[a-z0-9])$'
+ forbiddenPatterns:
+ # Avoid collision with ingest pipeline created by fleet, see https://github.com/elastic/package-spec/issues/699
+ - '^integration$'
+ required: true
+ additionalContents: false
+ contents:
+ - description: A data stream's manifest file
+ type: file
+ contentMediaType: "application/x-yaml"
+ sizeLimit: 5MB
+ name: "manifest.yml"
+ required: true
+ $ref: "./manifest.spec.yml"
+ - description: Folder containing field definitions
+ type: folder
+ name: fields
+ required: true
+ $ref: "./fields/spec.yml"
+ - description: Folder containing agent-related definitions
+ type: folder
+ name: agent
+ required: false
+ additionalContents: false
+ $ref: "./agent/spec.yml"
+ - description: Folder containing Elasticsearch assets
+ type: folder
+ name: elasticsearch
+ additionalContents: false
+ contents:
+ - description: Folder containing Elasticsearch ILM Policy Definition
+ type: folder
+ name: ilm
+ additionalContents: false
+ contents:
+ - description: Supporting ILM policy definitions in YAML
+ type: file
+ pattern: '^.+\.yml$'
+ # TODO Determine if special handling of `---` is required (issue: https://github.com/elastic/package-spec/pull/54)
+ contentMediaType: "application/x-yaml; require-document-dashes=true"
+ required: false
+ - description: Supporting ILM policy definitions in JSON
+ type: file
+ pattern: '^.+\.json$'
+ contentMediaType: "application/json"
+ required: false
+ - description: Folder containing Elasticsearch Ingest Node pipeline definitions
+ type: folder
+ name: ingest_pipeline
+ additionalContents: false
+ contents:
+ - description: Supporting ingest pipeline definitions in YAML
+ type: file
+ pattern: '^.+\.yml$'
+ # TODO Determine if special handling of `---` is required (issue: https://github.com/elastic/package-spec/pull/54)
+ contentMediaType: "application/x-yaml; require-document-dashes=true"
+ required: false
+ $ref: "../../integration/elasticsearch/pipeline.spec.yml"
+ - description: Supporting ingest pipeline definitions in JSON
+ type: file
+ pattern: '^.+\.json$'
+ contentMediaType: "application/json"
+ required: false
+ $ref: "../../integration/elasticsearch/pipeline.spec.yml"
+ - description: Sample event file
+ type: file
+ name: "sample_event.json"
+ contentMediaType: "application/json"
+ required: false
+ - description: Folder containing testing related files and sub-folders
+ type: folder
+ name: "test"
+ required: false
+ - description: Folder containing development resources
+ type: folder
+ name: _dev
+ required: false
+ visibility: private
+ $ref: "./_dev/spec.yml"
+ - description: File containing routing rules definitions (technical preview)
+ type: file
+ contentMediaType: "application/x-yaml"
+ name: "routing_rules.yml"
+ required: false
+ $ref: "./routing_rules.spec.yml"
+ - description: File containing lifecycle configuration (technical preview)
+ type: file
+ contentMediaType: "application/x-yaml"
+ name: "lifecycle.yml"
+ required: false
+ $ref: "lifecycle.spec.yml"
+
+versions:
+ - before: 3.0.0
+ patch:
+ - op: remove
+ path: "/contents/0/contents/3/contents/1/contents/0/$ref" # remove ingest pipeline validation as yaml
+ - op: remove
+ path: "/contents/0/contents/3/contents/1/contents/1/$ref" # remove ingest pipeline validation as json
+ - before: 2.10.0
+ patch:
+ - op: remove
+ path: "/contents/0/contents/8" # remove lifecycle definition
+ - before: 2.9.0
+ patch:
+ - op: remove
+ path: "/contents/0/contents/7" # remove routing_rules file definition
+```
diff --git a/docs/extend/define-deployment-modes.md b/docs/extend/define-deployment-modes.md
new file mode 100644
index 00000000000..8d3e2e4fa05
--- /dev/null
+++ b/docs/extend/define-deployment-modes.md
@@ -0,0 +1,89 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/define-deployment-modes.html
+---
+
+# Define deployment modes [define-deployment-modes]
+
+Some integrations can be deployed on fully managed agents. These integrations are known as "agentless" integrations. Define the deployment mode of an integration with the [`deployment_modes`](#deployment_modes) property and display/hide variables in different deployment modes with the [`hide_in_deployment_modes`](#hide_in_deployment_modes) property.
+
+
+## `deployment_modes` [deployment_modes]
+
+Policy templates can indicate which deployment modes they support. Use the `deployment_modes` property in the policy template schema to define the supported deployment modes. Options are `default` and `agentless`. A policy template can support both modes.
+
+Example policy template declaration:
+
+```yaml
+format_version: 3.2.0
+name: aws
+title: AWS
+version: 2.13.1
+...
+policy_templates:
+ - name: billing
+ title: AWS Billing
+ description: Collect billing metrics with Elastic Agent
+ deployment_modes: <1>
+ default:
+ enabled: false <2>
+ agentless:
+ enabled: true <3>
+ data_streams:
+ - billing
+ ...
+```
+
+1. Defines the supported deployment modes
+2. Disables agent deployment support
+3. Enables agentless deployment support
+
+
+
+## `hide_in_deployment_modes` [hide_in_deployment_modes]
+
+Variables can be hidden in certain deployment modes. Use the `hide_in_deployment_modes` property to opt variables in or out of being displayed in default or agentless mode. This property works at any manifest level.
+
+Example variable declaration:
+
+```yaml
+streams:
+ - input: filestream
+ vars:
+ - name: paths
+ type: text
+ title: Paths
+ multi: true
+ required: true
+ show_user: true
+ default:
+ - /var/log/my-package/*.log
+ - name: agentless_only
+ type: text
+ title: Agentless only variable
+ multi: false
+ required: false
+ show_user: true
+ hide_in_deployment_modes: <1>
+ - default
+ - name: hidden_in_agentless
+ type: text
+ title: Hidden in agentless variable
+ multi: false
+ required: false
+ show_user: true
+ hide_in_deployment_modes: <2>
+ - agentless
+```
+
+1. Disables visibility of the variable in agent deployment mode
+2. Disables visibility of the variable in agentless deployment mode
+
+
+For more information on variable property definitions, refer to [Define variable properties](/extend/finishing-touches.md#define-variable-properties).
+
+
+## Agentless capabilities [agentless-capabilities]
+
+The capabilities feature protects agentless deployments from allowing undesired inputs to run. A static `capabilities.yml` file defines these allowed and disallowed inputs and is passed to deployed agents. To determine which capabilities are currently allowed on Agentless, refer to [`capabilities.yml`](https://github.com/elastic/agentless-controller/blob/main/controllers/config/capabilities.yml).
+
diff --git a/docs/extend/dev-spec.md b/docs/extend/dev-spec.md
new file mode 100644
index 00000000000..033912d203e
--- /dev/null
+++ b/docs/extend/dev-spec.md
@@ -0,0 +1,39 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/dev-spec.html
+---
+
+# _dev [dev-spec]
+
+Development resources.
+
+**required**
+
+Included from the package-spec repository. This will update when the spec is updated.
+
+```yaml
+spec:
+ additionalContents: false
+ developmentFolder: true
+ contents:
+ - description: Folder containing resources related to package benchmarks.
+ type: folder
+ name: benchmark
+ required: false
+ $ref: "./benchmark/spec.yml"
+ - description: Folder containing resources related to building the package.
+ type: folder
+ name: build
+ required: false
+ $ref: "./build/spec.yml"
+ - description: Folder containing configuration related to deploying the package's service(s) required for testing scenarios.
+ type: folder
+ name: deploy
+ required: false
+ $ref: "./deploy/spec.yml"
+ - description: Folder containing configuration related test configuration.
+ type: folder
+ name: test
+ required: false
+ $ref: "./test/spec.yml"
+```
diff --git a/docs/extend/developer-tsds-guidelines.md b/docs/extend/developer-tsds-guidelines.md
new file mode 100644
index 00000000000..d956b2eeb31
--- /dev/null
+++ b/docs/extend/developer-tsds-guidelines.md
@@ -0,0 +1,214 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/developer-tsds-guidelines.html
+---
+
+# TSDS guidelines [developer-tsds-guidelines]
+
+This page describes how to enable TSDS functionality in your integration packages. Full details about TSDS can be found in [Time series data stream](docs-content://manage-data/data-store/data-streams/time-series-data-stream-tsds.md) in the {{es}} documentation.
+
+In this document you can find:
+
+* [Background](#integrations-dev-tsds-background)
+* [Steps for enabling TSDS for a metrics dataset](#integrations-dev-tsds-migrating)
+* [Testing](#integrations-dev-tsds-testing)
+* [Best practices](#integrations-dev-tsds-best-practices)
+* [Troubleshooting](#integrations-dev-tsds-troubleshooting)
+
+
+## Background [integrations-dev-tsds-background]
+
+A time series is a sequence of observations for a specific entity. TSDS enables the column-oriented functionality in elasticsearch by co-locating the data and optimizing the storage and aggregations to take advantage of such co-allocation.
+
+Integrations are one of the biggest sources of input data to Elasticsearch. Enabling TSDS on integration packages can be achieved by minimal changes made in the `fields.yml` and `manifest.yml` files of a package.
+
+
+## Steps for enabling TSDS for a metrics dataset [integrations-dev-tsds-migrating]
+
+::::{important}
+Datastreams having type `logs` are excluded from TSDS migration.
+::::
+
+
+
+## Step 1: Set the dimension fields [_step_1_set_the_dimension_fields]
+
+Each field belonging to the set of fields that uniquely identify a document is a dimension. For more details, refer to [Dimensions](docs-content://manage-data/data-store/data-streams/time-series-data-stream-tsds.md#time-series-dimension).
+
+To set a field as a dimension simply add `dimension: true` to its mapping:
+
+```yaml
+- name: ApiId
+ type: keyword
+ dimension: true
+```
+
+::::{note}
+A field having type [flattened](elasticsearch://docs/reference/elasticsearch/mapping-reference/flattened.md) cannot be selected as a dimension field. If the field that you are choosing as a dimension is too long or is of type flattened, consider hashing the value of this field and using the result as a dimension. [Fingerprint processor](elasticsearch://docs/reference/ingestion-tools/enrich-processor/fingerprint-processor.md) can be used for this purpose.
+
+You can find an example in [Oracle Integration TSDS Enablement Example](https://github.com/elastic/integrations/blob/8a57d6ba96d391afc33da20c80ec51280d22f009/packages/oracle/data_stream/performance/elasticsearch/ingest_pipeline/default.yml#LL127C4-L131C29)
+
+::::
+
+
+Important considerations:
+
+* There is a limit on how many dimension fields a datastream can have. By default, this value is `21`. You can adjust this restriction by altering the `index.mapping.dimension_fields.limit`:
+
+ ```yaml
+ elasticsearch:
+ index_template:
+ settings:
+ index.mapping.dimension_fields.limit: 32 # Defaults to 21
+ ```
+
+* Dimension keys have a hard limit of 512b. Documents are rejected if this limit is reached.
+* Dimension values have a hard limit of 1024b. Documents are rejected if this limit is reached.
+
+
+### ECS fields [_ecs_fields]
+
+There are fields that are part of every package, and they are potential candidates for becoming dimension fields:
+
+* `host.name`
+* `service.address`
+* `agent.id`
+* `container.id`
+
+For products that are capable of running both on-premise and in a public cloud environment (by being deployed on public cloud virtual machines), it is recommended to annotate the ECS fields listed below as dimension fields:
+
+* `host.name`
+* `service.address`
+* `container.id`
+* `cloud.account.id`
+* `cloud.provider`
+* `cloud.region`
+* `cloud.availability_zone`
+* `agent.id`
+* `cloud.instance.id`
+
+For products operating as managed services within cloud providers like AWS, Azure, and GCP, it is advised to label the fields listed below as dimension fields:
+
+* `cloud.account.id`
+* `cloud.region`
+* `cloud.availability_zone`
+* `cloud.provider`
+* `agent.id`
+
+Note that for some packages some of these fields do not hold any value, so make sure to only use the needed ones.
+
+
+### Integration specific fields [_integration_specific_fields]
+
+The `files.yml` file has the field mappings specific to a datastream of an integration. Some of these fields might need to be set as a dimension if the set of dimension fields in ECS is not enough to create a unique [`_tsid`](docs-content://manage-data/data-store/data-streams/time-series-data-stream-tsds.md#tsid).
+
+Adding an inline comment prior to the dimension annotation is advised, detailing the rationale behind the choice of a particular field as a dimension field:
+
+```yaml
+- name: wait_class
+ type: keyword
+ # Multiple events are generated based on the values of wait_class. Hence, it is a dimension
+ dimension: true
+ description: Every wait event belongs to a class of wait events.
+```
+
+
+## Step 2: Set type for metric fields [_step_2_set_type_for_metric_fields]
+
+Metrics are fields that contain numeric measurements, as well as aggregations and/or down sampling values based off of those measurements. Annotate each metric with the correct metric type. The [currently supported values](docs-content://manage-data/data-store/data-streams/time-series-data-stream-tsds.md#time-series-metric) are `gauge`, `counter`, and `null`.
+
+Example of adding a metric type to a field:
+
+```yaml
+- name: compactions_failed
+ type: double
+ metric_type: counter
+ description: |
+ Counter of TSM compactions by level that have failed due to error.
+```
+
+::::{note}
+Some of the aggregation functions are not supported for certain `metric_type` values. In such a scenario, please revisit to see if the selection of `metric_type` you made is indeed correct for that field. If valid, please create an issue in [elastic/elasticsearch](https://github.com/elastic/elasticsearch) explaining the use case.
+::::
+
+
+
+## Step 3: Update Kibana version [_step_3_update_kibana_version]
+
+Modify the `kibana.version` to at least `8.8.0` in the `manifest.yml` file of the package:
+
+```yaml
+conditions:
+ kibana.version: "^8.8.0"
+```
+
+
+## Step 4: Enable `time_series` index mode [_step_4_enable_time_series_index_mode]
+
+Add the changes to the `manifest.yml` file of the datastream as shown to enable the timeseries index mode:
+
+```yaml
+elasticsearch:
+ index_mode: "time_series"
+```
+
+
+## Testing [integrations-dev-tsds-testing]
+
+* If the number of dimensions is insufficient, we will have loss of data. Consider testing this using the [TSDS migration test kit](https://github.com/elastic/TSDB-migration-test-kit).
+* Verify the dashboard is rendering the data properly. If certain visualisation do not work, consider migrating to [Lens](docs-content://explore-analyze/visualize/lens.md). Remember that certain aggregation functions are not supported when a field has metric type `counter`, for example, `avg()`. Replace such aggregation functions with a supported aggregation type such as `max()` or `min()`.
+
+
+## Best practices [integrations-dev-tsds-best-practices]
+
+* Use [Lens](docs-content://explore-analyze/visualize/lens.md) as the preferred visualisation type.
+* Always assess the number of unique values the field that is selected to be a dimension would hold, especially if it is a numeric field. A field that holds millions of unique values may not be an ideal candidate for becoming a dimension field.
+* If the dimension field value length is very long (max limit is 1024B), consider transforming the value to hash value representation. [Fingerprint processor](elasticsearch://docs/reference/ingestion-tools/enrich-processor/fingerprint-processor.md) can be used for this purpose.
+* In the field mapping files above each dimension field, add in-line comments stating the reason for selecting the field as a dimension field.
+* As part of TSDS migration testing, you may discover other errors which may be unrelated to TSDS migration. Keep the pull request for TSDS migration free from such changes. This helps in obtaining quick PR approval.
+
+
+## Troubleshooting [integrations-dev-tsds-troubleshooting]
+
+
+### Dropped documents [_dropped_documents]
+
+In the event that after enabling TSDS you notice that metrics data is being dropped from an index, the [TSDS test migration kit](https://github.com/elastic/TSDB-migration-test-kit) can be used as a helpful debugging tool.
+
+
+### Conflicting field type [_conflicting_field_type]
+
+Fields having conflicting field type will not be considered as dimension. Resolve the field type ambiguity before defining a field as dimension field.
+
+
+### Identification of write index [_identification_of_write_index]
+
+When mappings are modified for a datastream, index rollover happens and a new index is created under the datastream. Even if there exists a new index, the data continues to go to the old index until the timestamp matches `index.time_series.start_time` of the newly created index.
+
+An [enhancement request](https://github.com/elastic/kibana/issues/150549) for Kibana is created to indicate the write index. Until then, refer to the index.time_series.start_time of indices and compare with the current time to identify the write index.
+
+If you find this error (for reference, see [integrations issue #7345](https://github.com/elastic/integrations/issues/7345) and [elasticsearch PR #98518](https://github.com/elastic/elasticsearch/pull/98518)):
+
+```console
+... (status=400): {"type":"illegal_argument_exception","reason":"the document timestamp [2023-08-07T00:00:00.000Z] is outside of ranges of currently writable indices [[2023-08-07T08:55:38.000Z,2023-08-07T12:55:38.000Z]]"}, dropping event!
+```
+
+Consider:
+
+1. Defining the `look_ahead` or `look_back_time` for each data stream. For example:
+
+ ```yaml
+ elasticsearch:
+ index_mode: "time_series"
+ index_template:
+ settings:
+ index.look_ahead_time: "10h"
+ ```
+
+ ::::{note}
+ Updating the package with this does not cause an automatic rollover on the data stream. You have to do that manually.
+ ::::
+
+2. Updating the `timestamp` of the document being rejected.
+3. Finding a fix to receive the document without a delay.
+
diff --git a/docs/extend/developer-workflow-fleet-UI.md b/docs/extend/developer-workflow-fleet-UI.md
new file mode 100644
index 00000000000..c4b9d12f6f7
--- /dev/null
+++ b/docs/extend/developer-workflow-fleet-UI.md
@@ -0,0 +1,105 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/developer-workflow-fleet-UI.html
+---
+
+# Development process for Fleet UI [developer-workflow-fleet-UI]
+
+See the Kibana docs for [how to set up your dev environment](https://github.com/elastic/kibana/blob/main/CONTRIBUTING.md#setting-up-your-development-environment), [run Elasticsearch](https://github.com/elastic/kibana/blob/main/CONTRIBUTING.md#running-elasticsearch), and [start Kibana](https://github.com/elastic/kibana/blob/main/CONTRIBUTING.md#running-kibana).
+
+One common development workflow is:
+
+1. Clone Kibana repo
+
+ ```bash
+ git clone https://github.com/[YOUR_USERNAME]/kibana.git kibana
+ cd kibana
+ ```
+
+2. Install Dependencies
+
+ ```bash
+ nvm use
+ npm install -g yarn
+ ```
+
+3. Bootstrap Kibana
+
+ ```bash
+ yarn kbn bootstrap
+ ```
+
+4. Start Elasticsearch in one shell
+
+ ```bash
+ yarn es snapshot -E xpack.security.authc.api_key.enabled=true
+ ```
+
+5. Start Kibana in another shell
+
+ ```bash
+ yarn start --xpack.fleet.enabled=true --no-base-path
+ ```
+
+6. Download fleet-server package from [https://www.elastic.co/downloads/past-releases/#elastic-agent](https://www.elastic.co/downloads/past-releases/#elastic-agent)
+7. Untar fleet server tarball and `cd` to the directory
+8. Install fleet-server (See also the alternative solution)
+
+ ```bash
+ sudo ./elastic-agent install -f \
+ --fleet-server-es=http://elastic:changeme@localhost:9200 \
+ --fleet-server-policy=
+ ```
+
+ The `default policy id` can be retrieved by fleet ui instructions in Kibana before any fleet server is installed. Fleet Server will start in `+https://users_machine_ip:8220+`
+
+9. Update Fleet settings on the top right corner of Fleet UI to set the correct Fleet Server hosts (ip from previous step).
+10. After that user can enroll as many agents as they want
+11. Any code update in Kibana fleet plugin should be picked up automatically and either cause the server to restart, or be served to the browser on the next page refresh.
+
+
+## Alternative solution for fleet server [_alternative_solution_for_fleet_server]
+
+Instead of download fleet server package and running it as a local process you can run Fleet Server Locally in a Container.
+
+It can be useful to run Fleet Server in a container on your local machine in order to free up your actual "bare metal" machine to run Elastic Agent for testing purposes. Otherwise, you’ll only be able to a single instance of Elastic Agent dedicated to Fleet Server on your local machine, and this can make testing integrations and policies difficult.
+
+*The following is adapted from the Fleet Server [README](https://github.com/elastic/fleet-server#running-elastic-agent-with-fleet-server-in-container)*
+
+1. Add the following configuration to your `config/kibana.yml`
+
+ ```yaml
+ server.host: 0.0.0.0
+ ```
+
+2. Append the following option to the command you use to start Elasticsearch
+
+ ```yaml
+ -E http.host=0.0.0.0
+ ```
+
+ This command should look something like this:
+
+ ```bash
+ yarn es snapshot --license trial -E xpack.security.authc.api_key.enabled=true -E path.data=/tmp/es-data -E http.host=0.0.0.0
+ ```
+
+3. Run the Fleet Server Docker container. Make sure you include a `BASE-PATH` value if your local Kibana instance is using one. `YOUR-IP` should correspond to the IP address used by your Docker network to represent the host. For Windows and Mac machines, this should be `192.168.65.2`. If you’re not sure what this IP should be, run the following to look it up:
+
+ ```bash
+ docker run -it --rm alpine nslookup host.docker.internal
+ ```
+
+ To run the Fleet Server Docker container:
+
+ ```bash
+ docker run -e KIBANA_HOST=http://{YOUR-IP}:5601/{BASE-PATH} -e KIBANA_USERNAME=elastic -e KIBANA_PASSWORD=changeme -e ELASTICSEARCH_HOST=http://{YOUR-IP}:9200 -e ELASTICSEARCH_USERNAME=elastic -e ELASTICSEARCH_PASSWORD=changeme -e KIBANA_FLEET_SETUP=1 -e FLEET_SERVER_ENABLE=1 -e FLEET_SERVER_INSECURE_HTTP=1 -p 8220:8220 docker.elastic.co/elastic-agent/elastic-agent:{VERSION}
+ ```
+
+ Ensure you provide the `-p 8220:8220` port mapping to map the Fleet Server container’s port `8220` to your local machine’s port `8220` in order for Fleet to communicate with Fleet Server.
+
+ For the latest version, use `8.0.0-SNAPSHOT`. Otherwise, you can explore the available versions at [https://www.docker.elastic.co/r/beats/elastic-agent](https://www.docker.elastic.co/r/beats/elastic-agent).
+
+ Once the Fleet Server container is running, you should be able to treat it as if it were a local process running on `+http://localhost:8220+` when configuring Fleet via the UI. You can then run `elastic-agent` on your local machine directly for testing purposes.
+
+
diff --git a/docs/extend/developer-workflow-import-beat.md b/docs/extend/developer-workflow-import-beat.md
new file mode 100644
index 00000000000..47bea4b5077
--- /dev/null
+++ b/docs/extend/developer-workflow-import-beat.md
@@ -0,0 +1,172 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/developer-workflow-import-beat.html
+---
+
+# Import integration from Beats modules [developer-workflow-import-beat]
+
+The import procedure heavily uses on the *import-beats* script. If you are interested how does it work internally, feel free to review the script’s [README](https://github.com/elastic/integrations/tree/main/dev/import-beats/README.md).
+
+1. Create an issue in the [integrations](https://github.com/elastic/integrations) to track ongoing progress with the integration (especially manual changes).
+
+ Focus on the one particular product (e.g. MySQL, ActiveMQ) you would like to integrate with. Use this issue to mention every manual change that has been applied. It will help in adjusting the `import-beats` script and reviewing the integration.
+
+2. Prepare the developer environment:
+
+ 1. Clone/refresh the following repositories:
+
+ * [https://github.com/elastic/beats](https://github.com/elastic/beats)
+ * [https://github.com/elastic/ecs](https://github.com/elastic/ecs)
+ * [https://github.com/elastic/eui](https://github.com/elastic/eui)
+ * [https://github.com/elastic/kibana](https://github.com/elastic/kibana)
+
+ Make sure you don’t have any manual changes applied as they will reflect on the integration.
+
+ 2. Clone/refresh the Elastic Integrations to always use the latest version of the script:
+
+ * [https://github.com/elastic/integrations](https://github.com/elastic/integrations)
+
+ 3. Make sure you’ve the `mage` tool installed:
+
+ ```bash
+ $ go get -u -d github.com/magefile/mage
+ ```
+
+3. Use the `elastic-package stack up -v -d` command to boot up required dependencies:
+
+ 1. Elasticseach instance:
+
+ * Kibana’s dependency
+
+ 2. Kibana instance:
+
+ * used to migrate dashboards, if not available, you can skip the generation (`SKIP_KIBANA=true`)
+
+ *Hint*. There is the `elastic-package` cheat sheet available [here](https://github.com/elastic/integrations/blob/main/testing/environments/README.md).
+
+4. Create a new branch for the integration in `integrations` repository (diverge from main).
+5. Run the command: `mage ImportBeats` to start the import process (note that the import script assumes the projects checked out in step 2 are at `+../{{project-name}}+`).
+
+ The outcome of running the `import-beats` script is directory with refreshed and updated integrations.
+
+ It will take a while to finish, but the console output should be updated frequently to track the progress. The command should terminate with an exit code of 0. If it doesn’t, please open an issue.
+
+ Generated packages are stored by default in the `packages` directory. Generally, the import process updates all of the integrations, so don’t be surprised if you notice updates to multiple integrations, including the one you’re currently working on (e.g. `packages/foobarbaz`). You can either commit these changes or leave them for later.
+
+ If you want to select a subgroup of packages, set the environment variable `PACKAGES` (comma-delimited list):
+
+ ```bash
+ $ PACKAGES=aws,cisco mage ImportBeats
+ ```
+
+
+
+## Fine tune the integration [_fine_tune_the_integration]
+
+Most of migration work has been done by the `import-beats` script, but there’re tasks that require developer’s interaction.
+
+It may happen that your integration misses a screenshot or an icon, it’s a good moment to add missing resources to Beats/Kibana repositories and re-import the integration (idempotent).
+
+
+### Checklist [_checklist]
+
+The order of action items on the checklist is advised to prevent the contributor from repeating some actions (fixing what’s been already fixed, as the script has overridden part of it).
+
+1. Add icon if missing.
+
+ The integration icons are presented in different places in Kibana, hence it’s better to define custom icons to make the UI easier to navigate.
+
+ As the `import-beats` script looks for icons in Kibana and EUI repositories, add an icon to the first one the same way as for tutorial resources (Kibana directory: `src/legacy/core_plugins/kibana/public/home/tutorial_resources/logos/`).
+
+2. Add screenshot if missing.
+
+ The Kibana Integration Manager shows screenshots related with the integration. Screenshots present Kibana dashboards visualizing the metric/log data.
+
+ The `import-beats` script finds references to screenshots mentioned in `_meta/docs.asciidoc` and copies image files from the Beats directories:
+
+ * `metricbeat/docs/images`
+ * `filebeat/docs/images`
+
+3. Improve/correct spelling product names.
+
+ The correct spelling of product names simply makes better impression. The `import-beats` scripts uses the `fields.yml` file as the source of the correct spelling (`title` property), e.g. Mysql - MySQL, Nginx - NGINX, Aws - AWS.
+
+ Keep in mind that this step requires reimporting package contents.
+
+4. Write README template file for the integration.
+
+ The README template is used to render the final README file including exported fields. The template should be placed in the `package//_dev/build/docs/README.md`. If the directory doesn’t exist, please create it.
+
+ Review the MySQL docs template to see how to use template functions (e.g. `{{fields "data-stream-name"}}`). If the same data stream name is used in both metrics and logs, please add `-metrics` and `-logs` in the template. For example, `elb` is a data stream for log and also a data stream for metrics. In README.md template, `{{fields "elb_logs"}}` and `{{fields "elb_metrics"}}` are used to separate them.
+
+5. Review fields file and exported fields in docs.
+
+ The goal of this action item is to verify if produced artifacts are correct.
+
+ The fields files (package-fields.yml, fields.yml and ecs.yml) in the package were created from original fields.yml files (that may contain ECS schema fields) and fields.epr.yml (defining some other fields used in the ingest pipeline). It may happen that original sources have a typo, bad description or misses a field definition. The sum of fields in all present files should contain only fields that are really used, e.g. not all existing ECS fields.
+
+ It may happen that the ingest pipeline uses fields abstracted from ECS, but not mentioned in `fields.yml`. Integrations should contain these fields and also have them documented.
+
+ The fields for an integration package are divided into the following three files:
+
+ * ecs.yml: ECS compliant fields that are used by this particular data stream.
+ * package-fields.yml: Package level fields that are used by this particular data stream, which does not exist under `.`.
+ * fields.yml: Dataset level fields that are specific to this particular data stream, and non ECS compliant.
+
+ See the PR [https://github.com/elastic/beats/pull/17895](https://github.com/elastic/beats/pull/17895) to understand how to add them to Beats (e.g. `event.code`, `event.provider`) using the `fields.epr.yml` file.
+
+6. Metricbeat: add missing configuration options.
+
+ The `import-beats` script extracts configuration options from Metricbeat module’s `_meta` directory. It analyzes the configuration files and selects options based on enabled metricsets (not commented). If you notice that some configuration options are missing in your package’s manifest files, simply create the `config.epr.yml` file with all required options.
+
+ Sample PR: [https://github.com/elastic/beats/pull/17323](https://github.com/elastic/beats/pull/17323)
+
+7. Review *titles* and *descriptions* in manifest files.
+
+ Titles and descriptions are fields visualized in the Kibana UI. Most users will use them to see how to configure the integration with their installation of a product or to how to use advanced configuration options.
+
+8. Compact configuration options (vars).
+
+ Currently, all configuration options are set by the `import-beats` script on the stream level (path: `data stream//manifest.yml`).
+
+ It may happen that some of them in different data streams are simply duplicates or concern the same setting, which will be always equal (e.g. MySQL username, password). Keep in mind that two data streams may have the same configuration option, but different values (e.g. `period`, `paths`), hence can’t be compacted.
+
+ To sum up, compacting takes down from the user the necessity to setup the same configuration option few times (one per data stream).
+
+9. Define all variable properties.
+
+ The variable properties customize visualization of configuration options in the Kibana UI. Make sure they’re defined in all manifest files.
+
+ ```yaml
+ vars:
+ - name: paths
+ required: true
+ show_user: true
+ title: Access log paths
+ description: Paths to the nginx access log file.
+ type: text
+ multi: true
+ default:
+ - /var/log/nginx/access.log*
+ ```
+
+ * **required** - option is required
+ * **show_user** - don’t hide the configuration option (collapsed menu)
+ * **title** - human readable variable name
+ * **description** - variable description (may contain some details)
+ * **type** - field type (according to the reference: text, password, bool, integer)
+ * **multi** - the field has mutliple values.
+
+10. Review stream configuration.
+
+ Due to changed templating engine from a standard Golang one to [handlebars](https://handlebarsjs.com/), it may be hard to automatically convert the Filebeat input configuration (nested variables, many representations, conditions, loops). Please review the output stream configuration and identify potential bugs.
+
+11. Update docs template with sample events.
+
+ The events collected by the agent slightly differ from the original, Metricbeat and Filebeat, ones. Adjust the event content manually basing on already migrated integrations (e.g. [MySQL integration](https://github.com/elastic/integrations/blob/main/packages/mysql/_dev/build/docs/README.md)) or copy them once managed to run whole setup with real agent.
+
+12. Kibana: use `stream.data stream` field instead of `event.data stream`.
+
+ Using `stream.data stream` instead of `event.data stream` also makes queries a lot more efficient as this is a `constant_keyword`. Make sure that dashboards in your package don’t use the `event.data stream` field. If so, simply replace them with the more efficient one.
+
+
diff --git a/docs/extend/developer-workflow-support-old-package.md b/docs/extend/developer-workflow-support-old-package.md
new file mode 100644
index 00000000000..0355befeaba
--- /dev/null
+++ b/docs/extend/developer-workflow-support-old-package.md
@@ -0,0 +1,122 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/developer-workflow-support-old-package.html
+---
+
+# Release a bug fix for supporting older package version [developer-workflow-support-old-package]
+
+In some cases, when we drop the support for an older version of the stack and later on find out needing to add a bug fix to the some old package version, we have to make some manual changes to release the bug fix to users. For example: in this [PR](https://github.com/elastic/integrations/pull/3688) (AWS package version 1.23.4), support for Kibana version 7.x was dropped and bumped the AWS package version from 1.19.5 to 1.20.0. But we found a bug in the EC2 dashboard that needs to be fixed with Kibana version 7.x. So instead of adding a new AWS package version 1.23.5, we need to fix it between 1.19.5 and 1.20.0.
+
+Follow these detailed steps to release a fix for a given package version:
+
+1. **Find git commit (package version) that needs to be fixed**
+
+ In the example above, the commit to be fixed is the one right before this [PR](https://github.com/elastic/integrations/pull/3688) updating package `aws`:
+
+ * Using the web:
+
+ * Look for the merge commit of the PR
+
+ * [https://github.com/elastic/integrations/commit/aa63e1f6a61d2a017e1f88af2735db129cc68e0c](https://github.com/elastic/integrations/commit/aa63e1f6a61d2a017e1f88af2735db129cc68e0c)
+ * It can be found as one of the last messages in the PR 
+ * And then show the previous commits for that changeset inside the package folder (e.g. `packages/aws`):
+ * [https://github.com/elastic/integrations/commits/aa63e1f6a61d2a017e1f88af2735db129cc68e0c/packages/aws/](https://github.com/elastic/integrations/commits/aa63e1f6a61d2a017e1f88af2735db129cc68e0c/packages/aws/) 
+
+ * Using the command line:
+
+ ```bash
+ cd packages/
+ git log --grep "#" .
+ git log -n 1 ^ .
+
+ # following the example
+ $ cd packages/aws
+ $ git log --grep "#3688"
+ commit aa63e1f6a61d2a017e1f88af2735db129cc68e0c
+ Author: Joe Reuter
+ Date: Mon Aug 8 17:14:55 2022 +0200
+
+ Inline all aws dashboards (#3688)
+
+ * inline all aws dashboards
+
+ * format
+
+ * apply the right format
+
+ * inline again
+
+ * format
+ $ git log -n 1 aa63e1f6a61d2a017e1f88af2735db129cc68e0c^ .
+ commit 8cb321075afb9b77ea965e1373a03a603d9c9796
+ Author: Mario Castro
+ Date: Thu Aug 4 16:52:06 2022 +0200
+
+ Move lightweight manifest to integration for EBS data stream (#3856)
+ ```
+
+2. Run the **integrations-backport** pipeline [https://buildkite.com/elastic/integrations-backport](https://buildkite.com/elastic/integrations-backport) for creating the backport branch. 
+
+ **Please, pay attention!**, if you just run the pipeline it’ll wait for your inputs, nothing will happen without that.
+
+ :::{image} ../images/backport_input_step.png
+ :alt: waiting input step
+ :::
+
+ Pipeline’s inputs:
+
+ * **DRY_RUN** (default: "true"), If DRY_RUN is defined as "true" it will check:
+
+ * if the package is published,
+ * if the entered commit exists,
+ * if the backport branch exists. Also, it will create the local branch, update the branch with `.buildkite` and `.ci` folders, and remove other packages except the defined one (if set as input). This local branch will not be pushed to the upstream repository in this mode.
+
+
+ If DRY_RUN is defined as "false", in addition to written above it will create a commit and push the local branch to the upstream repository [https://github.com/elastic/integrations.git](https://github.com/elastic/integrations.git). In this case, the name of the branch will be `+backport-${PACKAGE_NAME}-${TRIMMED_PACKAGE_VERSION}+`, for example, `backport-aws-1.19`.
+
+ * **BASE_COMMIT** (default: "") - enter the commit from the previous step (8cb321075afb9b77ea965e1373a03a603d9c9796)
+ * **PACKAGE_NAME** (default: "") - enter the package name, for example aws
+ * **PACKAGE_VERSION** (default: "") - enter the package version, for example: 1.19.7, 1.0.0-beta1
+ * **REMOVE_OTHER_PACKAGES** (default: "false") If **REMOVE_OTHER_PACKAGES** is defined as "true" all packages from the **packages** folder, except the defined package, will be removed from the created branch.
+
+3. **Create a PR for the bug fix**
+
+ Create a new branch in your own remote (it is advised **not using** a branch name starting with `backport-`), and apply bugfixes there. Remember to update the version in the package manifest (update patch version like `1.19.`) and add a new changelog entry for this patch version.
+
+ Once ready, open a PR selecting as a base branch the one created above: `backport--.` (e.g. `backport-aws-1.19`).
+
+ Once this PR is merged, this new version of the package is going to be published automatically following the usual CI/CD jobs.
+
+ If it is needed to release a new fix for that version, there is no need to create a new branch. Just create a new PR to merge a new branch onto the same backport branch created previously.
+
+4. **Update changelog in main**
+
+ Once PR has been merged in the corresponding backport branch (e.g. `backport-aws-1.9`) and the package has been published, a new Pull Request should be created manually to update the changelog in the main branch to include the new version published in the backport branch. Take into account to add the changelog entry following the version order.
+
+ In order to keep track, this new PR should have a reference (relates) to the backport PR too in its description.
+
+5. **Known issues and their solutions:**
+
+ 1. Missing shellinit command:
+
+ * Example of the error: [https://buildkite.com/elastic/integrations/builds/7634#018c87f4-7b0c-4d6f-8ddd-b779a9a7a019/507-512](https://buildkite.com/elastic/integrations/builds/7634#018c87f4-7b0c-4d6f-8ddd-b779a9a7a019/507-512)
+
+ `Error: could not create kibana client: undefined environment variable: ELASTIC_PACKAGE_KIBANA_HOST. If you have started the Elastic stack using the elastic-package tool, please load stack environment variables using 'eval "$(elastic-package stack shellinit)"' or set their values manually`
+
+ * **Solution**: add elastic-package stack shellinit command in `.buildkite/scripts/common.sh`.
+
+ * `eval "$(elastic-package stack shellinit)"`
+
+ Example: [https://github.com/elastic/integrations/blob/0226f93e0b1493d963a297e2072f79431f6cc443/.buildkite/scripts/common.sh#L828](https://github.com/elastic/integrations/blob/0226f93e0b1493d963a297e2072f79431f6cc443/.buildkite/scripts/common.sh#L828)
+
+ 2. Not found license file:
+
+ * Example of the error: [https://buildkite.com/elastic/integrations/builds/7644#018c883c-546f-4d32-ab4a-71e919ddebf8/270-309](https://buildkite.com/elastic/integrations/builds/7644#018c883c-546f-4d32-ab4a-71e919ddebf8/270-309)
+
+ `Error: checking package failed: building package failed: copying license text file: failure while looking for license "licenses/Elastic-2.0.txt" in repository: failed to find repository license: stat /opt/buildkite-agent/builds/bk-agent-prod-gcp-1703092724145948143/elastic/integrations/licenses/Elastic-2.0.txt: no such file or directory`
+
+ * **Solution**: Remove line defining `ELASTIC_PACKAGE_REPOSITORY_LICENSE` environment variable.
+
+ * Example: [https://github.com/elastic/integrations/blob/0daff27f0e0195a483771a50d60ab28ca2830f75/.buildkite/pipeline.yml#L17](https://github.com/elastic/integrations/blob/0daff27f0e0195a483771a50d60ab28ca2830f75/.buildkite/pipeline.yml#L17)
+
+
diff --git a/docs/extend/developer-workflows.md b/docs/extend/developer-workflows.md
new file mode 100644
index 00000000000..23e63221d31
--- /dev/null
+++ b/docs/extend/developer-workflows.md
@@ -0,0 +1,14 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/developer-workflows.html
+---
+
+# Developer workflows [developer-workflows]
+
+* [Development process for Fleet UI](/extend/developer-workflow-fleet-UI.md)
+* [Release a bug fix for supporting older package version](/extend/developer-workflow-support-old-package.md)
+* [Import integration from Beats modules](/extend/developer-workflow-import-beat.md)
+
+
+
+
diff --git a/docs/extend/docs-spec.md b/docs/extend/docs-spec.md
new file mode 100644
index 00000000000..0c9db45c45c
--- /dev/null
+++ b/docs/extend/docs-spec.md
@@ -0,0 +1,28 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/docs-spec.html
+---
+
+# docs [docs-spec]
+
+The built integration README file.
+
+**required**
+
+Included from the package-spec repository. This will update when the spec is updated.
+
+```yaml
+spec:
+ additionalContents: false
+ contents:
+ - description: Main README file
+ type: file
+ contentMediaType: "text/markdown"
+ name: "README.md"
+ required: true
+ - description: Other README files (can be used by policy templates)
+ type: file
+ contentMediaType: "text/markdown"
+ pattern: '^.+.md'
+ required: false
+```
diff --git a/docs/extend/documentation-guidelines.md b/docs/extend/documentation-guidelines.md
new file mode 100644
index 00000000000..ea5d9cd7537
--- /dev/null
+++ b/docs/extend/documentation-guidelines.md
@@ -0,0 +1,276 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/integrations-developer/current/documentation-guidelines.html
+---
+
+# Documentation guidelines [documentation-guidelines]
+
+The goal of each integration’s documentation is to:
+
+* Help the reader understand the benefits the integration offers and how Elastic can help with their use case. Inform the reader of any requirements, including system compatibility, supported versions of third-party products, permissions needed, and more.
+* Provide a comprehensive list of collected fields and the data and metric types for each. The reader can reference this information while evaluating the integration, interpreting collected data, or troubleshooting issues.
+* Set the reader up for a successful installation and setup by connecting them with any other resources they’ll need.
+* Each integration document should contain several sections, and you should use consistent headings to make it easier for a single user to evaluate and use multiple integrations.
+
+ * [Overview](#idg-docs-guidelines-overview)
+ * [Datastreams](#idg-docs-guidelines-datastreams)
+ * [Requirements](#idg-docs-guidelines-requirements)
+ * [Setup](#idg-docs-guidelines-setup)
+ * [Troubleshooting (optional)](#idg-docs-guidelines-troubleshooting)
+ * [Reference](#idg-docs-guidelines-reference)
+
+
+Some considerations when these documentation files are written at `_dev/build/docs/*.md`:
+
+* These files follow the Markdown syntax and leverage the use of [documentation templates](https://github.com/elastic/elastic-package/blob/main/docs/howto/add_package_readme.md).
+* There are some available functions or placeholders (`fields`, `event`, `url`) that can be used to help you write documentation. For more detail, refer to [placeholders](https://github.com/elastic/elastic-package/blob/main/docs/howto/add_package_readme.md#placeholders).
+* Regarding the `url` placeholder, this placeholder should be used to add links to the [Elastic documentation guides](https://www.elastic.co/guide/index.html) in your documentation:
+
+ * The file containing all of the defined links is in the root of the directory: [`links_table.yml`](https://github.com/elastic/elastic-package/blob/main/scripts/links_table.yml)
+ * If needed, more links to Elastic documentation guides can be added into that file.
+ * Example usage:
+
+ * In the documentation files (`_dev/build/docs/*.md`), `{{ url "getting-started-observability" "Elastic guide" }}` generates a link to the Observability Getting Started guide.
+
+
+
+### Overview [idg-docs-guidelines-overview]
+
+The overview section explains what the integration is, defines the third-party product that is providing data, establishes its relationship to the larger ecosystem of Elastic products, and helps the reader understand how it can be used to solve a tangible problem.
+
+The overview should answer the following questions:
+
+* What is the integration?
+* What is the third-party product that is providing data?
+* What can you do with it?
+
+ * General description
+ * Basic example
+
+
+
+#### Template [_template]
+
+Use this template language as a starting point, replacing `` with details about the integration:
+
+```text
+The integration allows you to monitor . is .
+
+Use the integration to . Then visualize that data in Kibana, create alerts to notify you if something goes wrong, and reference when troubleshooting an issue.
+
+For example, if you wanted to