diff --git a/.apigentools-info b/.apigentools-info index b522276a89586..0d99f481ff738 100644 --- a/.apigentools-info +++ b/.apigentools-info @@ -4,13 +4,13 @@ "spec_versions": { "v1": { "apigentools_version": "1.6.6", - "regenerated": "2025-01-07 19:55:03.627982", - "spec_repo_commit": "d63fa330" + "regenerated": "2025-01-14 16:00:39.158429", + "spec_repo_commit": "0457044b" }, "v2": { "apigentools_version": "1.6.6", - "regenerated": "2025-01-07 19:55:12.024253", - "spec_repo_commit": "d63fa330" + "regenerated": "2025-01-14 16:00:47.230514", + "spec_repo_commit": "0457044b" } } } \ No newline at end of file diff --git a/assets/styles/components/_collapsible-section.scss b/assets/styles/components/_collapsible-section.scss index 3b14befcacd45..677301b3ba86f 100644 --- a/assets/styles/components/_collapsible-section.scss +++ b/assets/styles/components/_collapsible-section.scss @@ -15,6 +15,11 @@ transition: background-color 0.3s; } +/* hide marker on safari */ +.collapsible-header::-webkit-details-marker { + display: none; +} + .collapsible-header:hover { background-color: #eae2f8; } diff --git a/config/_default/menus/api.en.yaml b/config/_default/menus/api.en.yaml index 5fc3e3c8b430a..be9b9849fb41a 100644 --- a/config/_default/menus/api.en.yaml +++ b/config/_default/menus/api.en.yaml @@ -1959,9 +1959,9 @@ menu: - CreateMonitor unstable: [] order: 1 - - name: Get all monitor details - url: '#get-all-monitor-details' - identifier: monitors-get-all-monitor-details + - name: Get all monitors + url: '#get-all-monitors' + identifier: monitors-get-all-monitors parent: monitors generated: true params: @@ -4540,6 +4540,22 @@ menu: - ListApmRetentionFilters unstable: [] order: 1 + - name: Agentless Scanning + url: /api/latest/agentless-scanning/ + identifier: agentless-scanning + generated: true + - name: Get AWS Scan Options + url: '#get-aws-scan-options' + identifier: agentless-scanning-get-aws-scan-options + parent: agentless-scanning + generated: true + params: + versions: + - v2 + operationids: + - ListAwsScanOptions + unstable: [] + order: 3 - name: Audit url: /api/latest/audit/ identifier: audit diff --git a/config/_default/menus/main.en.yaml b/config/_default/menus/main.en.yaml index 5e03e27c559d8..f255656791571 100644 --- a/config/_default/menus/main.en.yaml +++ b/config/_default/menus/main.en.yaml @@ -9,30 +9,33 @@ menu: - name: Service Management identifier: service_management_heading weight: 3000000 + - name: Actions + identifier: actions_heading + weight: 4000000 - name: Infrastructure identifier: infrastructure_heading - weight: 4000000 + weight: 5000000 - name: Application Performance identifier: apm_heading - weight: 5000000 + weight: 6000000 - name: Digital Experience identifier: digital_experience_heading - weight: 6000000 + weight: 7000000 - name: Software Delivery identifier: software_delivery_heading - weight: 7000000 + weight: 8000000 - name: Security identifier: security_platform_heading - weight: 8000000 + weight: 9000000 - name: AI Observability identifier: ai_observability_heading - weight: 9000000 + weight: 10000000 - name: Log Management identifier: log_management_heading - weight: 10000000 + weight: 11000000 - name: Administration identifier: administration_heading - weight: 11000000 + weight: 12000000 - name: Getting Started identifier: getting_started url: getting_started/ @@ -1175,6 +1178,10 @@ menu: identifier: watchdog_faulty_deployment_detection parent: watchdog_top_level weight: 5 + - name: Faulty Cloud & SaaS API Detection + url: watchdog/faulty_cloud_saas_api_detection + parent: watchdog_top_level + weight: 6 - name: Bits AI url: bits_ai/ pre: bits-ai @@ -1693,6 +1700,11 @@ menu: parent: infrastructure_resource_catalog identifier: infrastructure_resource_catalog_schema weight: 501 + - name: Governance + url: infrastructure/resource_catalog/governance/ + parent: infrastructure_resource_catalog + identifier: infrastructure_resource_catalog_governance + weight: 502 - name: Metrics url: metrics/ identifier: metrics_top_level @@ -2440,12 +2452,34 @@ menu: parent: case_management identifier: case_management_troubleshooting weight: 3 + - name: Private Actions + url: actions/private_actions/ + pre: server + parent: actions_heading + identifier: private_actions + weight: 10000 + - name: Use Private Actions + url: actions/private_actions/use_private_actions/ + parent: private_actions + identifier: use_private_actions + weight: 101 + - name: Private Action Credentials + url: actions/private_actions/private_action_credentials/ + parent: private_actions + identifier: private_actions_creds + weight: 102 + - name: Actions Catalog + url: actions/actions_catalog/ + pre: books + parent: actions_heading + identifier: actions_catalog + weight: 20000 - name: Workflow Automation url: service_management/workflows/ pre: workflows identifier: workflows - parent: service_management_heading - weight: 60000 + parent: actions_heading + weight: 30000 - name: Build Workflows url: service_management/workflows/build/ parent: workflows @@ -2486,62 +2520,42 @@ menu: parent: workflows_actions identifier: workflows_http weight: 504 - - name: Private Actions - url: service_management/workflows/private_actions/ - parent: workflows - identifier: workflows_private_actions - weight: 6 - - name: Use Private Actions - url: service_management/workflows/private_actions/use_private_actions/ - parent: workflows_private_actions - identifier: workflows_use_private_actions - weight: 601 - - name: Private Action Credentials - url: service_management/workflows/private_actions/private_action_credentials/ - parent: workflows_private_actions - identifier: workflows_private_actions_creds - weight: 602 - name: Save and Reuse Actions url: service_management/workflows/saved_actions/ parent: workflows identifier: workflows_saved_actions - weight: 7 + weight: 6 - name: Connections url: service_management/workflows/connections/ parent: workflows identifier: workflows_connections - weight: 8 + weight: 7 - name: Datastore url: service_management/workflows/datastore/ parent: workflows identifier: workflows_datastore - weight: 9 + weight: 8 - name: Test and Debug url: service_management/workflows/test_and_debug/ parent: workflows identifier: workflows_test_and_debug - weight: 10 + weight: 9 - name: Track Workflows url: service_management/workflows/track parent: workflows identifier: workflows_track - weight: 11 + weight: 10 - name: Limits url: service_management/workflows/limits/ parent: workflows identifier: workflows_limits - weight: 12 - - name: Actions Catalog - url: service_management/workflows/actions_catalog/ - parent: workflows - identifier: workflows_actions_catalog - weight: 13 + weight: 11 - name: App Builder url: service_management/app_builder/ pre: app-builder identifier: app_builder - parent: service_management_heading - weight: 70000 + parent: actions_heading + weight: 40000 - name: Build Apps url: service_management/app_builder/build/ parent: app_builder @@ -2577,51 +2591,36 @@ menu: parent: app_builder identifier: app_builder_events weight: 6 - - name: Private Actions - url: service_management/app_builder/private_actions/ - parent: app_builder - identifier: app_builder_private_actions - weight: 7 - - name: Use Private Actions - url: service_management/app_builder/private_actions/use_private_actions/ - parent: app_builder_private_actions - identifier: app_builder_use_private_actions - weight: 701 - - name: Private Action Credentials - url: service_management/app_builder/private_actions/private_action_credentials/ - parent: app_builder_private_actions - identifier: app_builder_private_actions_creds - weight: 702 - name: Components url: service_management/app_builder/components/ parent: app_builder identifier: app_builder_components - weight: 8 + weight: 7 - name: Custom Charts url: service_management/app_builder/components/custom_charts/ parent: app_builder_components identifier: app_builder_custom_charts - weight: 801 + weight: 701 - name: Tables url: service_management/app_builder/components/tables/ parent: app_builder_components identifier: app_builder_tables - weight: 802 + weight: 702 - name: Datastore url: service_management/app_builder/datastore/ parent: app_builder identifier: app_builder_datastore - weight: 9 + weight: 8 - name: JavaScript Expressions url: service_management/app_builder/expressions/ parent: app_builder identifier: app_builder_expressions - weight: 10 + weight: 9 - name: Embedded Apps url: service_management/app_builder/embedded_apps/ parent: app_builder identifier: app_builder_embedded_apps - weight: 11 + weight: 10 - name: Universal Service Monitoring url: universal_service_monitoring/ pre: usm @@ -5627,20 +5626,20 @@ menu: parent: security_platform identifier: security_suppressions weight: 3 - - name: Vulnerability Pipeline - url: security/vulnerability_pipeline/ + - name: Automation Pipelines + url: security/automation_pipelines/ parent: security_platform - identifier: vulnerability_pipeline + identifier: automation_pipelines weight: 4 - name: Mute - url: security/vulnerability_pipeline/mute - parent: vulnerability_pipeline - identifier: vulnerability_pipeline_mute + url: security/automation_pipelines/mute + parent: automation_pipelines + identifier: automation_pipelines_mute weight: 10001 - name: Add to Security Inbox - url: security/vulnerability_pipeline/security_inbox - parent: vulnerability_pipeline - identifier: vulnerability_pipeline_inbox + url: security/automation_pipelines/security_inbox + parent: automation_pipelines + identifier: automation_pipelines_inbox weight: 10002 - name: Security Inbox url: security/security_inbox @@ -5779,21 +5778,26 @@ menu: parent: csm_setup identifier: csm_setup_cloudtrail weight: 103 - - name: Set Up Iac Remediation + - name: Set Up IaC Scanning + url: security/cloud_security_management/setup/iac_scanning + parent: csm_setup + identifier: csm_setup_iac_scanning + weight: 104 + - name: Set Up IaC Remediation url: security/cloud_security_management/setup/iac_remediation parent: csm_setup identifier: csm_setup_iac_remediation - weight: 104 + weight: 105 - name: Set Up without Infrastructure Monitoring url: security/cloud_security_management/setup/without_infrastructure_monitoring parent: csm_setup identifier: csm_setup_without_infrastructure_monitoring - weight: 105 + weight: 106 - name: Deploy via Cloud Integrations url: security/cloud_security_management/setup/cloud_integrations parent: csm_setup identifier: csm_setup_cloud_integrations - weight: 106 + weight: 107 - name: Threats url: security/threats/ parent: csm @@ -6843,16 +6847,11 @@ menu: parent: rum_mobile_unity identifier: rum_mobile_unity_mobile_vitals weight: 105 - - name: Web View Tracking - url: real_user_monitoring/mobile_and_tv_monitoring/unity/web_view_tracking - parent: rum_mobile_unity - identifier: rum_mobile_unity_web_view_tracking - weight: 106 - name: Troubleshooting url: real_user_monitoring/mobile_and_tv_monitoring/unity/troubleshooting parent: rum_mobile_unity identifier: rum_mobile_unity_troubleshooting - weight: 107 + weight: 106 - name: Platform identifier: real_user_monitoring_platform url: real_user_monitoring/platform diff --git a/content/en/account_management/audit_trail/guides/_index.md b/content/en/account_management/audit_trail/guides/_index.md index 36b67ed3fa9e9..4c51d1d7b0fdf 100644 --- a/content/en/account_management/audit_trail/guides/_index.md +++ b/content/en/account_management/audit_trail/guides/_index.md @@ -4,5 +4,6 @@ disable_toc: false --- {{< whatsnext desc="Guides:" >}} - {{< nextlink href="account_management/audit_trail/guides/track_dashboard_usage" >}}Track dashboard usage{{< /nextlink >}} + {{< nextlink href="account_management/audit_trail/guides/track_dashboard_access_and_configuration_changes" >}}Track Dashboard Access and Configuration Changes{{< /nextlink >}} + {{< nextlink href="account_management/audit_trail/guides/track_monitor_access_and_configuration_changes" >}}Track Monitor Access and Configuration Changes{{< /nextlink >}} {{< /whatsnext >}} \ No newline at end of file diff --git a/content/en/account_management/audit_trail/guides/track_dashboard_access_and_configuration_changes.md b/content/en/account_management/audit_trail/guides/track_dashboard_access_and_configuration_changes.md new file mode 100644 index 0000000000000..ac8ea3abf8eab --- /dev/null +++ b/content/en/account_management/audit_trail/guides/track_dashboard_access_and_configuration_changes.md @@ -0,0 +1,72 @@ +--- +title: Track Dashboard Access and Configuration Changes +disable_toc: false +further_reading: +- link: "account_management/audit_trail/" + tag: "Documentation" + text: "Set up Audit Trail" +aliases: +- ./track_dashboard_usage/ +--- + +## Overview + +Audit Trail provides Datadog administrators visibility into who within the organization is using Datadog and how they are using it. This guide walks you through how you can see usage information for a specific dashboard. + +## View usage information for a specific dashboard + +### Get dashboard ID + +You need the dashboard's ID to get usage information for the dashboard. + +1. Navigate to [Dashboards][1]. +1. Select your dashboard. +1. The dashboard ID is in the dashboard URL, located after `https://app.datadoghq.com/dashboard/`. For example, if the dashboard URL is `https://app.datadoghq.com/dashboard/pte-tos-7kc/escalations-report`, the dashboard ID is `pte-tos-7kc`. +1. Copy the dashboard ID. + +### View dashboard usage in Audit Trail + +To see usage information for the dashboard, use Audit Trail to search for all API `GET` requests for that dashboard ID. + +1. Navigate to [Audit Trail][2]. +2. In the search bar, enter the query: `@http.status_code:200 @http.method:GET @http.url_details.path:/api/v1/dashboard/`. Replace `` with the dashboard ID you copied earlier.
For example, if the dashboard ID is `pte-tos-7kc`, the search query looks like this: +{{< img src="account_management/audit_logs/dashboard_access_query.png" alt="Search query for all successful GET requests for the dashboard ID pte-tos-7kc" style="width:100%;" >}} +`@http.status_code:200` narrows down the results to successful requests only. +
**Note**: You can also use the facet panel on the left side of the page to formulate the search query. +3. Select the timeframe in the upper right side of the page to see the events for a specific time period. +4. You can configure the **Group into fields** section and select different visualization tools to break down and analyze the data based on your use case. For example, if you set the `group by` field to `User Email` and click **Top List** in the **Visualize as** section, you get a top list of users who accessed the dashboard. +5. See [Create a dashboard or a graph][3] if you want to put this information into a dashboard or graph. + +## View recent dashboard configuration changes + +You can use [event queries][7] in Audit Trail to see a list of dashboards that have had recent changes to their configurations. + +1. Navigate to [Audit Trail][2]. +1. In the **Search for** field, paste a query to filter for the kind of changes you want to see. Here are some common examples: + + | Audit event | Query in audit explorer | + |-----------------------------------|--------------------------------------------------------------| + | [Recently created dashboards][4] | `@evt.name:Dashboard @asset.type:dashboard @action:created` | + | [Recently modified dashboards][5] | `@evt.name:Dashboard @asset.type:dashboard @action:modified` | + | [Recently deleted dashboards][6] | `@evt.name:Dashboard @asset.type:dashboard @action:deleted` | + +1. Optionally, on the facet panel, use filters like **Asset ID** or **Asset Name** to narrow your results down to a specific dashboard. +1. For each event in the table, you can see the email address of the user who performed the last change, and a summary of what happened. + + To see additional information about a specific change, click the row in the table. Then, click the **Inspect Changes (Diff)** tab to see the changes that were made to the dashboard's configuration: + + {{< img src="account_management/audit_logs/dashboard_change_diff.png" alt="A text diff showing a new widget being added to the dashboard" style="width:100%;" >}} + +1. See [Create a dashboard or a graph][3] if you want to put this information into a dashboard or graph. + +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} + +[1]: https://app.datadoghq.com/dashboard/lists +[2]: https://app.datadoghq.com/audit-trail +[3]: /account_management/audit_trail/#create-a-dashboard-or-a-graph +[4]: https://app.datadoghq.com/audit-trail?query=%40evt.name%3ADashboard%20%40asset.type%3Adashboard%20%40action%3Acreated +[5]: https://app.datadoghq.com/audit-trail?query=%40evt.name%3ADashboard%20%40asset.type%3Adashboard%20%40action%3Amodified +[6]: https://app.datadoghq.com/audit-trail?query=%40evt.name%3ADashboard%20%40asset.type%3Adashboard%20%40action%3Adeleted +[7]: /account_management/audit_trail/events \ No newline at end of file diff --git a/content/en/account_management/audit_trail/guides/track_dashboard_usage.md b/content/en/account_management/audit_trail/guides/track_dashboard_usage.md deleted file mode 100644 index ab022ab703a0e..0000000000000 --- a/content/en/account_management/audit_trail/guides/track_dashboard_usage.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Track Dashboard Usage -disable_toc: false -further_reading: -- link: "account_management/audit_trail/" - tag: "Documentation" - text: "Set up Audit Trail" ---- - -## Overview - -Audit Trail provides Datadog administrators visibility into who within the organization is using Datadog and how they are using it. This guide walks you through how you can see usage information for a specific dashboard. - -## View usage information for a specific dashboard - -### Get dashboard ID - -You need the dashboard's ID to get usage information for the dashboard. - -1. Navigate to [Dashboards][1]. -1. Select your dashboard. -1. The dashboard ID is in the dashboard URL, located after `https://app.datadoghq.com/dashboard/`. For example, if the dashboard URL is `https://app.datadoghq.com/dashboard/pte-tos-7kc/escalations-report`, the dashboard ID is `pte-tos-7kc`. -1. Copy the dashboard ID. - -### View dashboard usage in Audit Trail - -To see usage information for the dashboard, use Audit Trail to search for all API `GET` requests for that dashboard ID. - -1. Navigate to [Audit Trail][2]. -2. In the search bar, enter the query: `@http.status_code:200 @http.method:GET @http.url_details.path:/api/v1/dashboard/`. Replace `` with the dashboard ID you copied earlier.
For example, if the dashboard ID is `pte-tos-7kc`, the search query looks like this: -{{< img src="account_management/audit_logs/dashboard_access_query.png" alt="Search query for all successful GET requests for a the dashboard ID pte-tos-7kc" style="width:100%;" >}} -`@http.status_code:200` narrows down the results to successful requests only. -
**Note**: You can also use the facet panel on the left side of the page to formulate the search query. -3. Select the timeframe in the upper right side of the page to see the events for a specific time period. -4. You can configure the **Group into fields** section and select different visualization tools to break down and analyze the data based on your use case. For example, if you set the `group by` field to `User Email` and click **Top List** in the **Visualize as** section, you get a top list of users who accessed the dashboard. -5. See [Create a dashboard or graph][3] if you want to put this information into a dashboard or graph. - -## Further reading - -{{< partial name="whats-next/whats-next.html" >}} - -[1]: https://app.datadoghq.com/dashboard/lists -[2]: https://app.datadoghq.com/audit-trail -[3]: /account_management/audit_trail/#create-a-dashboard-or-a-graph \ No newline at end of file diff --git a/content/en/account_management/audit_trail/guides/track_monitor_access_and_configuration_changes.md b/content/en/account_management/audit_trail/guides/track_monitor_access_and_configuration_changes.md new file mode 100644 index 0000000000000..d1cf821532b1e --- /dev/null +++ b/content/en/account_management/audit_trail/guides/track_monitor_access_and_configuration_changes.md @@ -0,0 +1,73 @@ +--- +title: Track Monitor Access and Configuration Changes +disable_toc: false +further_reading: +- link: "account_management/audit_trail/" + tag: "Documentation" + text: "Set up Audit Trail" +--- + +## Overview + +Audit Trail provides Datadog administrators visibility into who within the organization is using Datadog and how they are using it. This guide walks you through how you can see usage information for a specific monitor. + +## View usage information for a specific monitor + +### Get monitor ID + +You need the monitor's ID to get usage information for the monitor. + +1. Navigate to [Monitors][1]. +1. Select your monitor. +1. The monitor ID is in the monitor URL, located after `https://app.datadoghq.com/monitors/`. For example, if the monitor URL is `https://app.datadoghq.com/monitors/123456789`, the monitor ID is `123456789`. +1. Copy the monitor ID. + +### View monitor usage in Audit Trail + +To see usage information for the monitor, use Audit Trail to search for all API `GET` requests for that monitor ID. + +1. Navigate to [Audit Trail][2]. +2. In the search bar, enter the query: `@http.status_code:200 @http.method:GET @http.url_details.path:/api/v1/monitor/`. Replace `` with the monitor ID you copied earlier. + + For example, if the monitor ID is `123456789`, the search query should be `@http.status_code:200 @http.method:GET @http.url_details.path:/api/v1/monitor/123456789`. `@http.status_code:200` narrows down the results to successful requests only. + + **Note**: You can also use the facet panel on the left side of the page to formulate the search query. +3. Select the timeframe in the upper right side of the page to see the events for a specific time period. +4. You can configure the **Group into fields** section and select different visualization tools to break down and analyze the data based on your use case. For example, if you set the `group by` field to `User Email` and click **Top List** in the **Visualize as** section, you get a top list of users who accessed the monitor. +5. See [Create a dashboard or a graph][3] if you want to put this information into a dashboard or graph. + +## View recent monitor configuration changes + +You can use [event queries][8] in Audit Trail to see a list of monitors that have had recent changes to their configurations. + +1. Navigate to [Audit Trail][2]. +1. In the **Search for** field, paste a query to filter for the kind of changes you want to see. Here are some common examples: + + | Audit event | Query in audit explorer | + |-----------------------|----------------------------------------------------------| + | [Monitor created][4] | `@evt.name:Monitor @asset.type:monitor @action:created` | + | [Monitor modified][5] | `@evt.name:Monitor @asset.type:monitor @action:modified` | + | [Monitor deleted][6] | `@evt.name:Monitor @asset.type:monitor @action:deleted` | + | [Monitor resolved][7] | `@evt.name:Monitor @asset.type:monitor @action:resolved` | + +1. Optionally, on the facet panel, use filters like **Asset ID** or **Asset Name** to narrow your results down to a specific monitor. +1. For each event in the table, you can see the email address of the user who performed the last change, and a summary of what happened. + + To see additional information about a specific change, click the row in the table. Then, click the **Inspect Changes (Diff)** tab to see the changes that were made to the monitor's configuration: + + {{< img src="account_management/audit_logs/monitor_change_diff.png" alt="A text diff showing a `check_type: api` tag being added to the monitor" style="width:100%;" >}} + +1. See [Create a dashboard or a graph][3] if you want to put this information into a dashboard or graph. + +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} + +[1]: https://app.datadoghq.com/monitors/manage +[2]: https://app.datadoghq.com/audit-trail +[3]: /account_management/audit_trail/#create-a-dashboard-or-a-graph +[4]: https://app.datadoghq.com/audit-trail?query=%40evt.name%3AMonitor%20%40asset.type%3Amonitor%20%40action%3Acreated +[5]: https://app.datadoghq.com/audit-trail?query=%40evt.name%3AMonitor%20%40asset.type%3Amonitor%20%40action%3Amodified +[6]: https://app.datadoghq.com/audit-trail?query=%40evt.name%3AMonitor%20%40asset.type%3Amonitor%20%40action%3Adeleted +[7]: https://app.datadoghq.com/audit-trail?query=%40evt.name%3AMonitor%20%40asset.type%3Amonitor%20%40action%3Aresolved +[8]: /account_management/audit_trail/events/#monitor-events \ No newline at end of file diff --git a/content/en/account_management/billing/ci_visibility.md b/content/en/account_management/billing/ci_visibility.md index a51ba5b342383..a406e843e6aae 100644 --- a/content/en/account_management/billing/ci_visibility.md +++ b/content/en/account_management/billing/ci_visibility.md @@ -23,9 +23,9 @@ This guide provides a non-exhaustive list of billing considerations for [CI Visi A committer is an active Git contributor, identified by their Git author email address. For billing purposes, a committer is included if they make at least three commits in a given month. -## Charges for bot or no-reply committers with GitHub.com email addresses +## Charges for commits made by bots or actions performed in the GitHub UI -Datadog does not charge for bot or actions made in the GitHub UI. These types of committers are excluded from billing calculations. +Datadog does not charge for bot or commits resulting from actions made in the GitHub UI. These types of commits are excluded from billing calculations. ## Excluding commits from specific people @@ -36,4 +36,4 @@ Yes, you can exclude commits from specific people by using [exclusion filters][2 {{< partial name="whats-next/whats-next.html" >}} [1]: /continuous_integration/pipelines -[2]: /continuous_integration/guides/ingestion_control \ No newline at end of file +[2]: /continuous_integration/guides/ingestion_control diff --git a/content/en/account_management/guide/access-your-support-ticket.md b/content/en/account_management/guide/access-your-support-ticket.md index a9398a0134271..f25aeb7c531b0 100644 --- a/content/en/account_management/guide/access-your-support-ticket.md +++ b/content/en/account_management/guide/access-your-support-ticket.md @@ -38,6 +38,26 @@ If you have opened at least one Datadog support ticket, follow this process to a {{< nextlink href="http://help.ddog-gov.com/" >}}US1-FED{{< /nextlink >}} {{< /whatsnext >}} +## Password requirements + +To ensure the security of your account, any password used to sign in to Datadog's Zendesk support portal must meet the following requirements: + +1. Password complexity: + - Must include at least **12 characters**. + - Must contain **uppercase and lowercase letters (A-Z)**. + - Must include at least **one number (0-9)**. + - Must include at least **one special character** (for example, `!`, `@`, `#`, or `%`). + - Must **not resemble an email address**. + - Must **not include the word "Zendesk"**. +1. Failed attempts and lockout: + - Users are allowed a maximum of **5 attempts** before the account is locked out temporarily. +1. Prohibited sequences: + - Passwords cannot include more than a specified number of consecutive letters or numbers. For instance, if the limit is set to 4, the system rejects passwords like `admin12345`. +1. Previous passwords: + - Users cannot reuse a certain number of their previously used passwords. +1. Expiration Policy: + - Passwords must be updated at least **every 90 days**, or whenever prompted by the system. + ## Troubleshooting ### Error: Refused to connect **Refused to connect** errors come from privacy settings that block third-party cookies. To solve this issue, make sure the browser allows third-party cookies from Zendesk. Find instructions on how to [Clear, enable, and manage cookies in Chrome][1] in Google Chrome Help. diff --git a/content/en/service_management/workflows/actions_catalog/_index.md b/content/en/actions/actions_catalog/_index.md similarity index 63% rename from content/en/service_management/workflows/actions_catalog/_index.md rename to content/en/actions/actions_catalog/_index.md index 4dcea0f2754b9..66acadbca0cec 100644 --- a/content/en/service_management/workflows/actions_catalog/_index.md +++ b/content/en/actions/actions_catalog/_index.md @@ -6,6 +6,7 @@ disable_sidebar: false aliases: - /workflows/actions_catalog - /service_management/service_management/workflows/actions_catalog +- /service_management/workflows/actions_catalog cascade: disable_sidebar: true type: actioncatalog @@ -14,14 +15,10 @@ cascade: --- {{< site-region region="gov" >}} -
Workflow Automation is not supported for your selected Datadog site ({{< region-param key="dd_site_name" >}}).
+
App Builder and Workflow Automation are not supported for your selected Datadog site ({{< region-param key="dd_site_name" >}}).
{{< /site-region >}} The Datadog Actions Catalog provides actions that can be performed against your infrastructure and integrations using either the Workflow Automation or App Builder products. You can orchestrate and automate your end-to-end processes by linking together actions that perform tasks in your cloud providers, SaaS tools, and Datadog accounts. See below for a list of all available actions. Click an action to see its description, inputs, outputs, and parameters. -[1]: /service_management/workflows/actions_catalog/generic_actions/#http -[2]: /service_management/workflows/actions_catalog/generic_actions/#data-transformation -[3]: /service_management/workflows/actions_catalog/logic_actions/ -[4]: /service_management/workflows/build/ diff --git a/content/en/service_management/workflows/private_actions/_index.md b/content/en/actions/private_actions/_index.md similarity index 89% rename from content/en/service_management/workflows/private_actions/_index.md rename to content/en/actions/private_actions/_index.md index a4ed82557b01a..5232a265343f4 100644 --- a/content/en/service_management/workflows/private_actions/_index.md +++ b/content/en/actions/private_actions/_index.md @@ -1,6 +1,9 @@ --- title: Private Actions Overview disable_toc: false +aliases: +- service_management/workflows/private_actions/ +- service_management/app_builder/private_actions/ further_reading: - link: "service_management/app_builder/connections" tag: "Documentation" @@ -8,18 +11,12 @@ further_reading: - link: "service_management/workflows/connections" tag: "Documentation" text: "Workflow Connections" -- link: "service_management/workflows/private_actions/use_private_actions" +- link: "actions/private_actions/use_private_actions" tag: "Documentation" - text: "Use Private Actions in Workflows" -- link: "service_management/app_builder/private_actions/use_private_actions" + text: "Use Private Actions" +- link: "actions/private_actions/private_action_credentials" tag: "Documentation" - text: "Use Private Actions in App Builder" -- link: "service_management/workflows/private_actions/private_action_credentials" - tag: "Documentation" - text: "Handling Private Action Credentials for Workflow Automation" -- link: "service_management/app_builder/private_actions/private_action_credentials" - tag: "Documentation" - text: "Handling Private Action Credentials for App Builder" + text: "Handling Private Action Credentials" --- {{< callout url="https://www.datadoghq.com/product-preview/private-actions/" btn_hidden="false" header="Join the Preview!">}} diff --git a/content/en/service_management/workflows/private_actions/private_action_credentials.md b/content/en/actions/private_actions/private_action_credentials.md similarity index 98% rename from content/en/service_management/workflows/private_actions/private_action_credentials.md rename to content/en/actions/private_actions/private_action_credentials.md index bb8467bf54bb6..74b1d10bbfa7f 100644 --- a/content/en/service_management/workflows/private_actions/private_action_credentials.md +++ b/content/en/actions/private_actions/private_action_credentials.md @@ -1,6 +1,8 @@ --- title: Handling Private Action Credentials - +aliases: +- service_management/workflows/private_actions/private_action_credentials +- service_management/app_builder/private_actions/private_action_credentials disable_toc: false --- @@ -10,7 +12,7 @@ Private Actions are in Preview. Use this form to request access today. ## Overview -Private actions allow your Datadog workflows and apps to interact with services hosted on your private network without exposing your services to the public internet. To use private actions, you must use Docker to install a private action runner on a host in your network and pair the runner with a Datadog Connection. For more information on setting up a runner and pairing it with a connection, see [Private Actions for Workflows][1] or [Private Actions for App Builder][2]. +Private actions allow your Datadog workflows and apps to interact with services hosted on your private network without exposing your services to the public internet. To use private actions, you must use Docker to install a private action runner on a host in your network and pair the runner with a Datadog Connection. For more information on setting up a runner and pairing it with a connection, see [Private Actions][1]. Some private actions, such as Jenkins and PostgreSQL, require credentials to function. To configure credentials for a private action, you must: 1. Create a JSON file for the credential and use the JSON structure provided in [Credential files](#credential-files). @@ -240,5 +242,4 @@ In the runner's connection, specify the location of the credential file on the p {{< /tabs >}} -[1]: /service_management/workflows/private_actions -[2]: /service_management/app_builder/private_actions \ No newline at end of file +[1]: /actions/private_actions \ No newline at end of file diff --git a/content/en/service_management/workflows/private_actions/use_private_actions.md b/content/en/actions/private_actions/use_private_actions.md similarity index 96% rename from content/en/service_management/workflows/private_actions/use_private_actions.md rename to content/en/actions/private_actions/use_private_actions.md index ced54efbd24d1..5f34f16420174 100644 --- a/content/en/service_management/workflows/private_actions/use_private_actions.md +++ b/content/en/actions/private_actions/use_private_actions.md @@ -1,6 +1,9 @@ --- title: Use Private Actions disable_toc: false +aliases: +- service_management/workflows/private_actions/use_private_actions +- service_management/app_builder/private_actions/use_private_actions further_reading: - link: "service_management/app_builder/connections" tag: "Documentation" @@ -8,15 +11,12 @@ further_reading: - link: "service_management/workflows/connections" tag: "Documentation" text: "Workflow Connections" -- link: "service_management/workflows/private_actions/" +- link: "actions/private_actions/" tag: "Documentation" text: "Private Actions Overview" -- link: "service_management/workflows/private_actions/private_action_credentials" +- link: "actions/private_actions/private_action_credentials" tag: "Documentation" - text: "Handling Private Action Credentials for Workflow Automation" -- link: "service_management/app_builder/private_actions/private_action_credentials" - tag: "Documentation" - text: "Handling Private Action Credentials for App Builder" + text: "Handling Private Action Credentials" --- {{< callout url="https://www.datadoghq.com/product-preview/private-actions/" btn_hidden="false" header="Join the Preview!">}} @@ -195,9 +195,7 @@ To pair a runner to a connection: 1. From the [Workflow Automation][8] or [App Builder][9] Connections page, click **New Connection**. 1. Select the integration you want to connect with your private action runner. For a list of integrations that support private actions, see [Supported private actions](#supported-private-actions). 1. Add a **Connection Name** and select your runner from the **Private Action Runner** dropdown. -1. Add the paths to any required credential files. For more information on credentials, see: - - [Handling Private Action Credentials for Workflows][10]. - - [Handling Private Action Credentials for App Builder][16]. +1. Add the paths to any required credential files. For more information on credentials, see [Handling Private Action Credentials][10]. ## Use a private action @@ -417,11 +415,10 @@ To edit the allowlist for a Private Action Runner: [7]: https://app.datadoghq.com/app-builder/private-action-runners [8]: https://app.datadoghq.com/workflow/connections [9]: https://app.datadoghq.com/app-builder/connections -[10]: /service_management/workflows/private_actions/private_action_credentials +[10]: /actions/private_actions/private_action_credentials [11]: https://app.datadoghq.com/workflow/ [12]: https://app.datadoghq.com/app-builder/ [13]: /service_management/workflows/build [14]: /service_management/app_builder/build [15]: /service_management/workflows/build/#build-a-workflow-with-the-workflow-builder -[16]: /service_management/app_builder/private_actions/private_action_credentials -[17]: /service_management/workflows/private_actions/ +[17]: /actions/private_actions/ diff --git a/content/en/agent/troubleshooting/permissions.md b/content/en/agent/troubleshooting/permissions.md index 27939e3c06b2f..36b228fe36f25 100644 --- a/content/en/agent/troubleshooting/permissions.md +++ b/content/en/agent/troubleshooting/permissions.md @@ -104,6 +104,10 @@ See the following GitHub issues for more information and other potential methods * https://github.com/DataDog/dd-agent/issues/853 * https://github.com/DataDog/dd-agent/issues/2033 +## Permissions issues when running the Agent as a system daemon on MacOS + +If you installed the Agent as a system-wide launch daemon using the `DD_SYSTEMDAEMON_INSTALL` and `DD_SYSTEMDAEMON_USER_GROUP` options, verify that the user and group you used for `DD_SYSTEMDAEMON_USER_GROUP` are valid and have the correct permissions. + ## Further Reading {{< partial name="whats-next/whats-next.html" >}} diff --git a/content/en/api/latest/agentless-scanning/_index.md b/content/en/api/latest/agentless-scanning/_index.md new file mode 100644 index 0000000000000..24fe396979c17 --- /dev/null +++ b/content/en/api/latest/agentless-scanning/_index.md @@ -0,0 +1,3 @@ +--- +title: Agentless Scanning +--- diff --git a/content/en/api/v1/usage-metering/examples.json b/content/en/api/v1/usage-metering/examples.json index 402c884a4d7f7..4972e0eff99c6 100644 --- a/content/en/api/v1/usage-metering/examples.json +++ b/content/en/api/v1/usage-metering/examples.json @@ -2659,6 +2659,9 @@ "rum_mobile_lite_session_count_ios_agg_sum": "integer", "rum_mobile_lite_session_count_reactnative_agg_sum": "integer", "rum_mobile_lite_session_count_roku_agg_sum": "integer", + "rum_mobile_replay_session_count_android_agg_sum": "integer", + "rum_mobile_replay_session_count_ios_agg_sum": "integer", + "rum_mobile_replay_session_count_reactnative_agg_sum": "integer", "rum_replay_session_count_agg_sum": "integer", "rum_session_count_agg_sum": "integer", "rum_total_session_count_agg_sum": "integer", @@ -2936,6 +2939,9 @@ "rum_mobile_lite_session_count_ios_sum": "integer", "rum_mobile_lite_session_count_reactnative_sum": "integer", "rum_mobile_lite_session_count_roku_sum": "integer", + "rum_mobile_replay_session_count_android_sum": "integer", + "rum_mobile_replay_session_count_ios_sum": "integer", + "rum_mobile_replay_session_count_reactnative_sum": "integer", "rum_replay_session_count_sum": "integer", "rum_session_count_sum": "integer", "rum_total_session_count_sum": "integer", @@ -2980,6 +2986,9 @@ "rum_mobile_lite_session_count_ios_sum": "integer", "rum_mobile_lite_session_count_reactnative_sum": "integer", "rum_mobile_lite_session_count_roku_sum": "integer", + "rum_mobile_replay_session_count_android_sum": "integer", + "rum_mobile_replay_session_count_ios_sum": "integer", + "rum_mobile_replay_session_count_reactnative_sum": "integer", "rum_replay_session_count_sum": "integer", "rum_session_count_sum": "integer", "rum_total_session_count_sum": "integer", @@ -3011,7 +3020,7 @@ "vuln_management_host_count_top99p_sum": "integer", "workflow_executions_usage_agg_sum": "integer" }, - "html": "
\n
\n
\n
\n

agent_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all agent hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_azure_app_service_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services using APM over all hours in the current month all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_devsecops_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all APM DevSecOps hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_fargate_count_avg_sum

\n
\n

int64

\n

Shows the average of all APM ECS Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all distinct APM hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

appsec_fargate_count_avg_sum

\n
\n

int64

\n

Shows the average of all Application Security Monitoring ECS Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

asm_serverless_agg_sum

\n
\n

int64

\n

Shows the sum of all Application Security Monitoring Serverless invocations over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

audit_logs_lines_indexed_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all audit logs lines indexed over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

audit_trail_enabled_hwm_sum

\n
\n

int64

\n

Shows the total number of organizations that had Audit Trail enabled over a specific number of months.

\n
\n \n
\n
\n
\n
\n
\n

avg_profiled_fargate_tasks_sum

\n
\n

int64

\n

The average total count for Fargate Container Profiler over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all AWS hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_func_count

\n
\n

int64

\n

Shows the average of the number of functions that executed 1 or more times each hour in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_invocations_sum

\n
\n

int64

\n

Shows the sum of all AWS Lambda invocations over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

azure_app_service_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

azure_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Azure hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

billable_ingested_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_lite_session_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser lite sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_replay_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all browser replay sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_units_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM units over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

ci_pipeline_indexed_spans_agg_sum

\n
\n

int64

\n

Shows the sum of all CI pipeline indexed spans over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_test_indexed_spans_agg_sum

\n
\n

int64

\n

Shows the sum of all CI test indexed spans over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_itr_committers_hwm_sum

\n
\n

int64

\n

Shows the high-water mark of all CI visibility intelligent test runner committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_pipeline_committers_hwm_sum

\n
\n

int64

\n

Shows the high-water mark of all CI visibility pipeline committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_test_committers_hwm_sum

\n
\n

int64

\n

Shows the high-water mark of all CI visibility test committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_aws_host_count_avg_sum

\n
\n

int64

\n

Sum of the host count average for Cloud Cost Management for AWS.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_azure_host_count_avg_sum

\n
\n

int64

\n

Sum of the host count average for Cloud Cost Management for Azure.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_gcp_host_count_avg_sum

\n
\n

int64

\n

Sum of the host count average for Cloud Cost Management for GCP.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_host_count_avg_sum

\n
\n

int64

\n

Sum of the host count average for Cloud Cost Management for all cloud providers.

\n
\n \n
\n
\n
\n
\n
\n

cloud_siem_events_agg_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Information and Event Management events over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sa_committers_hwm_sum

\n
\n

int64

\n

Shows the high-water mark of all Static Analysis committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sca_committers_hwm_sum

\n
\n

int64

\n

Shows the high-water mark of all static Software Composition Analysis committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

code_security_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Code Security hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

container_avg_sum

\n
\n

int64

\n

Shows the average of all distinct containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

container_excl_agent_avg_sum

\n
\n

int64

\n

Shows the average of the containers without the Datadog Agent over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

container_hwm_sum

\n
\n

int64

\n

Shows the sum of the high-water marks of all distinct containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_compliance_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise compliance containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_cws_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise Cloud Workload Security containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_total_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aas_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure app services hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aws_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise AWS hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_azure_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_compliance_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise compliance hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_cws_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Cloud Workload Security hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_gcp_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise GCP hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_total_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aas_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure app services hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aws_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro AWS hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_azure_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_avg_sum

\n
\n

int64

\n

Shows the average number of Cloud Security Management Pro containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_hwm_sum

\n
\n

int64

\n

Shows the sum of the the high-water marks of Cloud Security Management Pro containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_gcp_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro GCP hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

custom_historical_ts_sum

\n
\n

int64

\n

Shows the average number of distinct historical custom metrics over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

custom_live_ts_sum

\n
\n

int64

\n

Shows the average number of distinct live custom metrics over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

custom_ts_sum

\n
\n

int64

\n

Shows the average number of distinct custom metrics over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_container_avg_sum

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_fargate_task_avg_sum

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Workload Security hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

data_jobs_monitoring_host_hr_agg_sum

\n
\n

int64

\n

Shows the sum of Data Jobs Monitoring hosts over all hours in the current months for all organizations

\n
\n \n
\n
\n
\n
\n
\n

dbm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Database Monitoring hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

dbm_queries_avg_sum

\n
\n

int64

\n

Shows the average of all distinct Database Monitoring Normalized Queries over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

end_date

\n
\n

date-time

\n

Shows the last date of usage in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_agent_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with the Datadog Agent over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_alibaba_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Alibaba over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_aws_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on AWS over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_azure_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Azure over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_ent_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Enterprise over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_gcp_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on GCP over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_heroku_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Heroku over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_aas_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only Azure App Services over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_vsphere_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only vSphere over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_apm_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_pro_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_proplus_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro Plus over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_error_events_agg_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking error events over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_events_agg_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking events over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_rum_error_events_agg_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking RUM error events over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_avg_sum

\n
\n

int64

\n

The average number of Profiling Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_eks_avg_sum

\n
\n

int64

\n

The average number of Profiling Fargate Elastic Kubernetes Service tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_avg_sum

\n
\n

int64

\n

Shows the average of all Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_hwm_sum

\n
\n

int64

\n

Shows the sum of the high-water marks of all Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_large_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Large Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_medium_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Medium Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_small_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Small Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_xsmall_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Extra Small Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_index_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Index Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_retention_adjustment_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Retention Adjustment Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_stored_logs_avg_sum

\n
\n

int64

\n

Shows the average of all Flex Stored Logs over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

forwarding_events_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all logs forwarding bytes over all hours in the current month for all organizations (data available as of April 1, 2023)

\n
\n \n
\n
\n
\n
\n
\n

gcp_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all GCP hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

heroku_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Heroku dynos over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

incident_management_monthly_active_users_hwm_sum

\n
\n

int64

\n

Shows sum of the the high-water marks of incident management monthly active users in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

indexed_events_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all log events indexed over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

infra_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all distinct infrastructure hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ingested_events_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_agg_sum

\n
\n

int64

\n

Shows the sum of all IoT devices over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all IoT devices over all hours in the current month of all organizations.

\n
\n \n
\n
\n
\n
\n
\n

last_updated

\n
\n

date-time

\n

Shows the the most recent hour in the current month for all organizations for which all usages were calculated.

\n
\n \n
\n
\n
\n
\n
\n

live_indexed_events_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all live logs indexed over all hours in the current month for all organization (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

live_ingested_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all live logs bytes ingested over all hours in the current month for all organizations (data available as of December 1, 2020).

\n
\n \n
\n
\n
\n
\n
\n

logs_by_retention

\n
\n

object

\n

Object containing logs usage data broken down by retention period.

\n
\n
\n
\n
\n
\n

orgs

\n
\n

object

\n

Indexed logs usage summary for each organization for each retention period with usage.

\n
\n
\n
\n
\n
\n

usage

\n
\n

[object]

\n

Indexed logs usage summary for each organization.

\n
\n
\n
\n
\n
\n

usage

\n
\n

[object]

\n

Indexed logs usage for each active retention for the organization.

\n
\n
\n
\n
\n
\n

logs_indexed_logs_usage_sum

\n
\n

int64

\n

Total indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_live_indexed_logs_usage_sum

\n
\n

int64

\n

Live indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_rehydrated_indexed_logs_usage_sum

\n
\n

int64

\n

Rehydrated indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

retention

\n
\n

string

\n

The retention period in days or "custom" for all custom retention periods.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

usage

\n
\n

[object]

\n

Aggregated index logs usage for each retention period with usage.

\n
\n
\n
\n
\n
\n

logs_indexed_logs_usage_agg_sum

\n
\n

int64

\n

Total indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_live_indexed_logs_usage_agg_sum

\n
\n

int64

\n

Live indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_rehydrated_indexed_logs_usage_agg_sum

\n
\n

int64

\n

Rehydrated indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

retention

\n
\n

string

\n

The retention period in days or "custom" for all custom retention periods.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

usage_by_month

\n
\n

object

\n

Object containing a summary of indexed logs usage by retention period for a single month.

\n
\n
\n
\n
\n
\n

date

\n
\n

date-time

\n

The month for the usage.

\n
\n \n
\n
\n
\n
\n
\n

usage

\n
\n

[object]

\n

Indexed logs usage for each active retention for the month.

\n
\n
\n
\n
\n
\n

logs_indexed_logs_usage_sum

\n
\n

int64

\n

Total indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_live_indexed_logs_usage_sum

\n
\n

int64

\n

Live indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_rehydrated_indexed_logs_usage_sum

\n
\n

int64

\n

Rehydrated indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

retention

\n
\n

string

\n

The retention period in days or "custom" for all custom retention periods.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

mobile_rum_lite_session_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile lite sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_android_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Android over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_flutter_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Flutter over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_ios_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on iOS over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_reactnative_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on React Native over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_roku_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Roku over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_units_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM units over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

ndm_netflow_events_agg_sum

\n
\n

int64

\n

Shows the sum of all Network Device Monitoring NetFlow events over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

netflow_indexed_events_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all Network flows indexed over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

npm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all distinct Cloud Network Monitoring hosts (formerly known as Network hosts) over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

observability_pipelines_bytes_processed_agg_sum

\n
\n

int64

\n

Sum of all observability pipelines bytes processed over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

oci_host_agg_sum

\n
\n

int64

\n

Shows the sum of Oracle Cloud Infrastructure hosts over all hours in the current months for all organizations

\n
\n \n
\n
\n
\n
\n
\n

oci_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of Oracle Cloud Infrastructure hosts over all hours in the current months for all organizations

\n
\n \n
\n
\n
\n
\n
\n

online_archive_events_count_agg_sum

\n
\n

int64

\n

Sum of all online archived events over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_apm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

profiling_aas_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all profiled Azure app services over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

profiling_container_agent_count_avg

\n
\n

int64

\n

Shows the average number of profiled containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

profiling_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all profiled hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

rehydrated_indexed_events_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all rehydrated logs indexed over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rehydrated_ingested_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all rehydrated logs bytes ingested over all hours in the current month for all organizations (data available as of December 1, 2020).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_and_mobile_session_count

\n
\n

int64

\n

Shows the sum of all mobile sessions and all browser lite and legacy sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_legacy_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all browser RUM legacy sessions over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_lite_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all browser RUM lite sessions over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_replay_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all browser RUM Session Replay counts over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_lite_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all RUM lite sessions (browser and mobile) over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_android_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Android over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_flutter_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Flutter over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_ios_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on iOS over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_reactnative_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on React Native over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_roku_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Roku over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_android_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Android over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_flutter_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Flutter over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_ios_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on iOS over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_reactnative_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on React Native over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_roku_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_replay_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all RUM Session Replay counts over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_session_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM lite sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_total_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of RUM sessions (browser and mobile) over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

rum_units_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser and mobile RUM units over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_avg_sum

\n
\n

int64

\n

Shows the average of all Software Composition Analysis Fargate tasks over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_hwm_sum

\n
\n

int64

\n

Shows the sum of the high-water marks of all Software Composition Analysis Fargate tasks over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_apm_scanned_bytes_sum

\n
\n

int64

\n

Sum of all APM bytes scanned with sensitive data scanner in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_events_scanned_bytes_sum

\n
\n

int64

\n

Sum of all event stream events bytes scanned with sensitive data scanner in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_logs_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned of logs usage by the Sensitive Data Scanner over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_rum_scanned_bytes_sum

\n
\n

int64

\n

Sum of all RUM bytes scanned with sensitive data scanner in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_total_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned across all usage types by the Sensitive Data Scanner over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_azure_count_avg_sum

\n
\n

int64

\n

Sum of the average number of Serverless Apps for Azure in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_google_count_avg_sum

\n
\n

int64

\n

Sum of the average number of Serverless Apps for Google Cloud in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_total_count_avg_sum

\n
\n

int64

\n

Sum of the average number of Serverless Apps for Azure and Google Cloud in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

siem_analyzed_logs_add_on_count_agg_sum

\n
\n

int64

\n

Shows the sum of all log events analyzed by Cloud SIEM over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

start_date

\n
\n

date-time

\n

Shows the first date of usage in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_browser_check_calls_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Synthetic browser tests over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_check_calls_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Synthetic API tests over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_mobile_test_runs_agg_sum

\n
\n

int64

\n

Shows the sum of Synthetic mobile application tests over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_parallel_testing_max_slots_hwm_sum

\n
\n

int64

\n

Shows the sum of the high-water marks of used synthetics parallel testing slots over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

trace_search_indexed_events_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Indexed Spans indexed over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

twol_ingested_events_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all ingested APM span bytes over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

universal_service_monitoring_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Universal Service Monitoring hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

usage

\n
\n

[object]

\n

An array of objects regarding hourly usage.

\n
\n
\n
\n
\n
\n

agent_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all agent hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_azure_app_service_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services using APM over all hours in the current date all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_devsecops_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all APM DevSecOps hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

apm_fargate_count_avg

\n
\n

int64

\n

Shows the average of all APM ECS Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct APM hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

appsec_fargate_count_avg

\n
\n

int64

\n

Shows the average of all Application Security Monitoring ECS Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

asm_serverless_sum

\n
\n

int64

\n

Shows the sum of all Application Security Monitoring Serverless invocations over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

audit_logs_lines_indexed_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of audit logs lines indexed over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

audit_trail_enabled_hwm

\n
\n

int64

\n

Shows the number of organizations that had Audit Trail enabled in the current date.

\n
\n \n
\n
\n
\n
\n
\n

avg_profiled_fargate_tasks

\n
\n

int64

\n

The average total count for Fargate Container Profiler over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all AWS hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_func_count

\n
\n

int64

\n

Shows the average of the number of functions that executed 1 or more times each hour in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_invocations_sum

\n
\n

int64

\n

Shows the sum of all AWS Lambda invocations over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

azure_app_service_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

billable_ingested_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_lite_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser lite sessions over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser replay sessions over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM units over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

ci_pipeline_indexed_spans_sum

\n
\n

int64

\n

Shows the sum of all CI pipeline indexed spans over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_test_indexed_spans_sum

\n
\n

int64

\n

Shows the sum of all CI test indexed spans over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_itr_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility intelligent test runner committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_pipeline_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility pipeline committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_test_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility test committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_aws_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for AWS for the given date and given organization.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_azure_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for Azure for the given date and given organization.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_gcp_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for GCP for the given date and given organization.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for all cloud providers for the given date and given organization.

\n
\n \n
\n
\n
\n
\n
\n

cloud_siem_events_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Information and Event Management events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sa_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all Static Analysis committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sca_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all static Software Composition Analysis committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_security_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Code Security hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

container_avg

\n
\n

int64

\n

Shows the average of all distinct containers over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

container_excl_agent_avg

\n
\n

int64

\n

Shows the average of containers without the Datadog Agent over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

container_hwm

\n
\n

int64

\n

Shows the high-water mark of all distinct containers over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_compliance_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise compliance containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_cws_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise Cloud Workload Security containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_total_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aas_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure app services hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aws_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise AWS hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_azure_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_compliance_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise compliance hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_cws_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Cloud Workload Security hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_gcp_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise GCP hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_total_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aas_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure app services hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro AWS hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_azure_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_avg

\n
\n

int64

\n

Shows the average number of Cloud Security Management Pro containers over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_hwm

\n
\n

int64

\n

Shows the high-water mark of Cloud Security Management Pro containers over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_gcp_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro GCP hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

custom_ts_avg

\n
\n

int64

\n

Shows the average number of distinct custom metrics over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_container_count_avg

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security containers over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_fargate_task_avg

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Workload Security hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

data_jobs_monitoring_host_hr_sum

\n
\n

int64

\n

Shows the sum of all Data Jobs Monitoring hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

date

\n
\n

date-time

\n

The date for the usage.

\n
\n \n
\n
\n
\n
\n
\n

dbm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Database Monitoring hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

dbm_queries_count_avg

\n
\n

int64

\n

Shows the average of all normalized Database Monitoring queries over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_agent_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with the Datadog Agent over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_alibaba_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Alibaba over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_aws_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on AWS over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_azure_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Azure over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_ent_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Enterprise over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_gcp_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on GCP over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_heroku_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Heroku over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_aas_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only Azure App Services over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_vsphere_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only vSphere over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_apm_sum

\n
\n

int64

\n

Shows the sum of all ephemeral APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_sum

\n
\n

int64

\n

Shows the sum of all ephemeral hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_pro_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_proplus_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro Plus over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_error_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking error events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_rum_error_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking RUM error events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_avg

\n
\n

int64

\n

The average number of Profiling Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_eks_avg

\n
\n

int64

\n

The average number of Profiling Fargate Elastic Kubernetes Service tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_avg

\n
\n

int64

\n

Shows the high-watermark of all Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_hwm

\n
\n

int64

\n

Shows the average of all Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_large_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Large Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_medium_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Medium Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_small_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Small Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_xsmall_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Extra Small Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_index_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Index Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_retention_adjustment_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Retention Adjustment Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_stored_logs_avg

\n
\n

int64

\n

Shows the average of all Flex Stored Logs over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

forwarding_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes forwarded over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

gcp_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all GCP hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

heroku_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Heroku dynos over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

incident_management_monthly_active_users_hwm

\n
\n

int64

\n

Shows the high-water mark of incident management monthly active users over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

indexed_events_count_sum

\n
\n

int64

\n

Shows the sum of all log events indexed over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

infra_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct infrastructure hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ingested_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_sum

\n
\n

int64

\n

Shows the sum of all IoT devices over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_top99p

\n
\n

int64

\n

Shows the 99th percentile of all IoT devices over all hours in the current date all organizations.

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_lite_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile lite sessions over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_android_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Android over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_flutter_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Flutter over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_ios_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on iOS over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_reactnative_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on React Native over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_roku_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Roku over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM units over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

ndm_netflow_events_sum

\n
\n

int64

\n

Shows the sum of all Network Device Monitoring NetFlow events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

netflow_indexed_events_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all Network flows indexed over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

npm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct Cloud Network Monitoring hosts (formerly known as Network hosts) over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

observability_pipelines_bytes_processed_sum

\n
\n

int64

\n

Sum of all observability pipelines bytes processed over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

oci_host_sum

\n
\n

int64

\n

Shows the sum of all Oracle Cloud Infrastructure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

oci_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Oracle Cloud Infrastructure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

online_archive_events_count_sum

\n
\n

int64

\n

Sum of all online archived events over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_apm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

orgs

\n
\n

[object]

\n

Organizations associated with a user.

\n
\n
\n
\n
\n
\n

account_name

\n
\n

string

\n

The account name.

\n
\n \n
\n
\n
\n
\n
\n

account_public_id

\n
\n

string

\n

The account public id.

\n
\n \n
\n
\n
\n
\n
\n

agent_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all agent hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

apm_azure_app_service_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services using APM over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

apm_devsecops_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all APM DevSecOps hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

apm_fargate_count_avg

\n
\n

int64

\n

Shows the average of all APM ECS Fargate tasks over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

apm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct APM hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

appsec_fargate_count_avg

\n
\n

int64

\n

Shows the average of all Application Security Monitoring ECS Fargate tasks over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

asm_serverless_sum

\n
\n

int64

\n

Shows the sum of all Application Security Monitoring Serverless invocations over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

audit_logs_lines_indexed_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all audit logs lines indexed over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

audit_trail_enabled_hwm

\n
\n

int64

\n

Shows whether Audit Trail is enabled for the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

avg_profiled_fargate_tasks

\n
\n

int64

\n

The average total count for Fargate Container Profiler over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

aws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all AWS hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_func_count

\n
\n

int64

\n

Shows the sum of all AWS Lambda invocations over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_invocations_sum

\n
\n

int64

\n

Shows the sum of all AWS Lambda invocations over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

azure_app_service_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

billable_ingested_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_lite_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser lite sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser replay sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM units over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

ci_pipeline_indexed_spans_sum

\n
\n

int64

\n

Shows the sum of all CI pipeline indexed spans over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

ci_test_indexed_spans_sum

\n
\n

int64

\n

Shows the sum of all CI test indexed spans over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_itr_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility intelligent test runner committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_pipeline_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility pipeline committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_test_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility test committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_aws_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for AWS for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_azure_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for Azure for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_gcp_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for GCP for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for all cloud providers for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

cloud_siem_events_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Information and Event Management events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sa_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all Static Analysis committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sca_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all static Software Composition Analysis committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_security_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Code Security hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

container_avg

\n
\n

int64

\n

Shows the average of all distinct containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

container_excl_agent_avg

\n
\n

int64

\n

Shows the average of containers without the Datadog Agent over all hours in the current date for the given organization.

\n
\n \n
\n
\n
\n
\n
\n

container_hwm

\n
\n

int64

\n

Shows the high-water mark of all distinct containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_compliance_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise compliance containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_cws_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise Cloud Workload Security containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_total_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aas_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure app services hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aws_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise AWS hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_azure_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_compliance_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise compliance hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_cws_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Cloud Workload Security hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_gcp_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise GCP hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_total_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aas_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure app services hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro AWS hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_azure_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_avg

\n
\n

int64

\n

Shows the average number of Cloud Security Management Pro containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_hwm

\n
\n

int64

\n

Shows the high-water mark of Cloud Security Management Pro containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_gcp_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro GCP hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

custom_historical_ts_avg

\n
\n

int64

\n

Shows the average number of distinct historical custom metrics over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

custom_live_ts_avg

\n
\n

int64

\n

Shows the average number of distinct live custom metrics over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

custom_ts_avg

\n
\n

int64

\n

Shows the average number of distinct custom metrics over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cws_container_count_avg

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cws_fargate_task_avg

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Workload Security hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

data_jobs_monitoring_host_hr_sum

\n
\n

int64

\n

Shows the sum of all Data Jobs Monitoring hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

dbm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Database Monitoring hosts over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

dbm_queries_avg_sum

\n
\n

int64

\n

Shows the average of all distinct Database Monitoring normalized queries over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_agent_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with the Datadog Agent over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_alibaba_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Alibaba over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_aws_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on AWS over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_azure_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Azure over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_ent_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Enterprise over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_gcp_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on GCP over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_heroku_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Heroku over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_aas_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only Azure App Services over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_vsphere_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only vSphere over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_apm_sum

\n
\n

int64

\n

Shows the sum of all ephemeral APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_sum

\n
\n

int64

\n

Shows the sum of all ephemeral hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_pro_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_proplus_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro Plus over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_error_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking error events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_rum_error_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking RUM error events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_avg

\n
\n

int64

\n

The average number of Profiling Fargate tasks over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_eks_avg

\n
\n

int64

\n

The average number of Profiling Fargate Elastic Kubernetes Service tasks over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_avg

\n
\n

int64

\n

The average task count for Fargate.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_hwm

\n
\n

int64

\n

Shows the high-water mark of all Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_large_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Large Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_medium_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Medium Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_small_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Small Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_xsmall_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Extra Small Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_index_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Index Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_retention_adjustment_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Retention Adjustment Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_stored_logs_avg

\n
\n

int64

\n

Shows the average of all Flex Stored Logs over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

forwarding_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes forwarded over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

gcp_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all GCP hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

heroku_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Heroku dynos over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

The organization id.

\n
\n \n
\n
\n
\n
\n
\n

incident_management_monthly_active_users_hwm

\n
\n

int64

\n

Shows the high-water mark of incident management monthly active users over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

indexed_events_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all log events indexed over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

infra_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct infrastructure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

ingested_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_agg_sum

\n
\n

int64

\n

Shows the sum of all IoT devices over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all IoT devices over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_lite_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile lite sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_android_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Android over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_flutter_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Flutter over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_ios_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on iOS over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_reactnative_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on React Native over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_roku_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Roku over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM units over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

The organization name.

\n
\n \n
\n
\n
\n
\n
\n

ndm_netflow_events_sum

\n
\n

int64

\n

Shows the sum of all Network Device Monitoring NetFlow events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

netflow_indexed_events_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all Network flows indexed over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

npm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct Cloud Network Monitoring hosts (formerly known as Network hosts) over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

observability_pipelines_bytes_processed_sum

\n
\n

int64

\n

Sum of all observability pipelines bytes processed over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

oci_host_sum

\n
\n

int64

\n

Shows the sum of all Oracle Cloud Infrastructure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

oci_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Oracle Cloud Infrastructure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

online_archive_events_count_sum

\n
\n

int64

\n

Sum of all online archived events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_apm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

profiling_aas_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all profiled Azure app services over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

profiling_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all profiled hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

public_id

\n
\n

string

\n

The organization public id.

\n
\n \n
\n
\n
\n
\n
\n

region

\n
\n

string

\n

The region of the organization.

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_and_mobile_session_count

\n
\n

int64

\n

Shows the sum of all mobile sessions and all browser lite and legacy sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_legacy_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM legacy sessions over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_lite_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM lite sessions over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_lite_session_count_sum

\n
\n

int64

\n

Shows the sum of all RUM lite sessions (browser and mobile) over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_android_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Android over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_flutter_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Flutter over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_ios_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on iOS over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_reactnative_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on React Native over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_roku_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Roku over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_android_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Android over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_flutter_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Flutter over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_ios_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on iOS over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_reactnative_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on React Native over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_roku_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM lite sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_total_session_count_sum

\n
\n

int64

\n

Shows the sum of RUM sessions (browser and mobile) over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser and mobile RUM units over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_avg

\n
\n

int64

\n

Shows the average of all Software Composition Analysis Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_hwm

\n
\n

int64

\n

Shows the sum of the high-water marks of all Software Composition Analysis Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_apm_scanned_bytes_sum

\n
\n

int64

\n

Sum of all APM bytes scanned with sensitive data scanner over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_events_scanned_bytes_sum

\n
\n

int64

\n

Sum of all event stream events bytes scanned with sensitive data scanner over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_logs_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned of logs usage by the Sensitive Data Scanner over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_rum_scanned_bytes_sum

\n
\n

int64

\n

Sum of all RUM bytes scanned with sensitive data scanner over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_total_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned across all usage types by the Sensitive Data Scanner over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_azure_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Azure for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_google_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Google Cloud for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_total_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Azure and Google Cloud for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

siem_analyzed_logs_add_on_count_sum

\n
\n

int64

\n

Shows the sum of all log events analyzed by Cloud SIEM over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_browser_check_calls_count_sum

\n
\n

int64

\n

Shows the sum of all Synthetic browser tests over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_check_calls_count_sum

\n
\n

int64

\n

Shows the sum of all Synthetic API tests over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_mobile_test_runs_sum

\n
\n

int64

\n

Shows the sum of all Synthetic mobile application tests over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_parallel_testing_max_slots_hwm

\n
\n

int64

\n

Shows the high-water mark of used synthetics parallel testing slots over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

trace_search_indexed_events_count_sum

\n
\n

int64

\n

Shows the sum of all Indexed Spans indexed over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

twol_ingested_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all ingested APM span bytes over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

universal_service_monitoring_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Universal Service Monitoring hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

vsphere_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all vSphere hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

vuln_management_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Application Vulnerability Management hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

workflow_executions_usage_sum

\n
\n

int64

\n

Sum of all workflows executed over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

profiling_aas_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all profiled Azure app services over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

profiling_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all profiled hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_and_mobile_session_count

\n
\n

int64

\n

Shows the sum of all mobile sessions and all browser lite and legacy sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_legacy_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM legacy sessions over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_lite_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM lite sessions over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM Session Replay counts over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_lite_session_count_sum

\n
\n

int64

\n

Shows the sum of all RUM lite sessions (browser and mobile) over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_android_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Android over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_flutter_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy Sessions on Flutter over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_ios_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on iOS over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_reactnative_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on React Native over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_roku_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Roku over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_android_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Android over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_flutter_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Flutter over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_ios_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on iOS over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_reactnative_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on React Native over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_roku_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all RUM Session Replay counts over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM lite sessions over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_total_session_count_sum

\n
\n

int64

\n

Shows the sum of RUM sessions (browser and mobile) over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser and mobile RUM units over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_avg

\n
\n

int64

\n

Shows the average of all Software Composition Analysis Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_hwm

\n
\n

int64

\n

Shows the sum of the high-water marks of all Software Composition Analysis Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_apm_scanned_bytes_sum

\n
\n

int64

\n

Sum of all APM bytes scanned with sensitive data scanner over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_events_scanned_bytes_sum

\n
\n

int64

\n

Sum of all event stream events bytes scanned with sensitive data scanner over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_logs_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned of logs usage by the Sensitive Data Scanner over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_rum_scanned_bytes_sum

\n
\n

int64

\n

Sum of all RUM bytes scanned with sensitive data scanner over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_total_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned across all usage types by the Sensitive Data Scanner over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_azure_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Azure for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_google_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Google Cloud for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_total_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Azure and Google Cloud for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

siem_analyzed_logs_add_on_count_sum

\n
\n

int64

\n

Shows the sum of all log events analyzed by Cloud SIEM over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_browser_check_calls_count_sum

\n
\n

int64

\n

Shows the sum of all Synthetic browser tests over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_check_calls_count_sum

\n
\n

int64

\n

Shows the sum of all Synthetic API tests over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_mobile_test_runs_sum

\n
\n

int64

\n

Shows the sum of all Synthetic mobile application tests over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_parallel_testing_max_slots_hwm

\n
\n

int64

\n

Shows the high-water mark of used synthetics parallel testing slots over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

trace_search_indexed_events_count_sum

\n
\n

int64

\n

Shows the sum of all Indexed Spans indexed over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

twol_ingested_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all ingested APM span bytes over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

universal_service_monitoring_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all universal service management hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

vsphere_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all vSphere hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

vuln_management_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Application Vulnerability Management hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

workflow_executions_usage_sum

\n
\n

int64

\n

Sum of all workflows executed over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

vsphere_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all vSphere hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

vuln_management_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Application Vulnerability Management hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

workflow_executions_usage_agg_sum

\n
\n

int64

\n

Sum of all workflows executed over all hours in the current month for all organizations.

\n
\n \n
\n
" + "html": "
\n
\n
\n
\n

agent_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all agent hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_azure_app_service_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services using APM over all hours in the current month all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_devsecops_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all APM DevSecOps hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_fargate_count_avg_sum

\n
\n

int64

\n

Shows the average of all APM ECS Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all distinct APM hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

appsec_fargate_count_avg_sum

\n
\n

int64

\n

Shows the average of all Application Security Monitoring ECS Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

asm_serverless_agg_sum

\n
\n

int64

\n

Shows the sum of all Application Security Monitoring Serverless invocations over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

audit_logs_lines_indexed_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all audit logs lines indexed over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

audit_trail_enabled_hwm_sum

\n
\n

int64

\n

Shows the total number of organizations that had Audit Trail enabled over a specific number of months.

\n
\n \n
\n
\n
\n
\n
\n

avg_profiled_fargate_tasks_sum

\n
\n

int64

\n

The average total count for Fargate Container Profiler over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all AWS hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_func_count

\n
\n

int64

\n

Shows the average of the number of functions that executed 1 or more times each hour in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_invocations_sum

\n
\n

int64

\n

Shows the sum of all AWS Lambda invocations over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

azure_app_service_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

azure_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Azure hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

billable_ingested_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_lite_session_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser lite sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_replay_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all browser replay sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_units_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM units over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

ci_pipeline_indexed_spans_agg_sum

\n
\n

int64

\n

Shows the sum of all CI pipeline indexed spans over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_test_indexed_spans_agg_sum

\n
\n

int64

\n

Shows the sum of all CI test indexed spans over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_itr_committers_hwm_sum

\n
\n

int64

\n

Shows the high-water mark of all CI visibility intelligent test runner committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_pipeline_committers_hwm_sum

\n
\n

int64

\n

Shows the high-water mark of all CI visibility pipeline committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_test_committers_hwm_sum

\n
\n

int64

\n

Shows the high-water mark of all CI visibility test committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_aws_host_count_avg_sum

\n
\n

int64

\n

Sum of the host count average for Cloud Cost Management for AWS.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_azure_host_count_avg_sum

\n
\n

int64

\n

Sum of the host count average for Cloud Cost Management for Azure.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_gcp_host_count_avg_sum

\n
\n

int64

\n

Sum of the host count average for Cloud Cost Management for GCP.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_host_count_avg_sum

\n
\n

int64

\n

Sum of the host count average for Cloud Cost Management for all cloud providers.

\n
\n \n
\n
\n
\n
\n
\n

cloud_siem_events_agg_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Information and Event Management events over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sa_committers_hwm_sum

\n
\n

int64

\n

Shows the high-water mark of all Static Analysis committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sca_committers_hwm_sum

\n
\n

int64

\n

Shows the high-water mark of all static Software Composition Analysis committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

code_security_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Code Security hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

container_avg_sum

\n
\n

int64

\n

Shows the average of all distinct containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

container_excl_agent_avg_sum

\n
\n

int64

\n

Shows the average of the containers without the Datadog Agent over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

container_hwm_sum

\n
\n

int64

\n

Shows the sum of the high-water marks of all distinct containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_compliance_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise compliance containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_cws_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise Cloud Workload Security containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_total_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aas_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure app services hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aws_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise AWS hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_azure_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_compliance_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise compliance hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_cws_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Cloud Workload Security hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_gcp_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise GCP hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_total_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aas_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure app services hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aws_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro AWS hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_azure_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_avg_sum

\n
\n

int64

\n

Shows the average number of Cloud Security Management Pro containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_hwm_sum

\n
\n

int64

\n

Shows the sum of the the high-water marks of Cloud Security Management Pro containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_gcp_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro GCP hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

custom_historical_ts_sum

\n
\n

int64

\n

Shows the average number of distinct historical custom metrics over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

custom_live_ts_sum

\n
\n

int64

\n

Shows the average number of distinct live custom metrics over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

custom_ts_sum

\n
\n

int64

\n

Shows the average number of distinct custom metrics over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_container_avg_sum

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_fargate_task_avg_sum

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Workload Security hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

data_jobs_monitoring_host_hr_agg_sum

\n
\n

int64

\n

Shows the sum of Data Jobs Monitoring hosts over all hours in the current months for all organizations

\n
\n \n
\n
\n
\n
\n
\n

dbm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Database Monitoring hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

dbm_queries_avg_sum

\n
\n

int64

\n

Shows the average of all distinct Database Monitoring Normalized Queries over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

end_date

\n
\n

date-time

\n

Shows the last date of usage in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_agent_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with the Datadog Agent over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_alibaba_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Alibaba over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_aws_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on AWS over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_azure_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Azure over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_ent_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Enterprise over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_gcp_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on GCP over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_heroku_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Heroku over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_aas_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only Azure App Services over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_vsphere_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only vSphere over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_apm_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_pro_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_proplus_agg_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro Plus over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_error_events_agg_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking error events over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_events_agg_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking events over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_rum_error_events_agg_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking RUM error events over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_avg_sum

\n
\n

int64

\n

The average number of Profiling Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_eks_avg_sum

\n
\n

int64

\n

The average number of Profiling Fargate Elastic Kubernetes Service tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_avg_sum

\n
\n

int64

\n

Shows the average of all Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_hwm_sum

\n
\n

int64

\n

Shows the sum of the high-water marks of all Fargate tasks over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_large_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Large Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_medium_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Medium Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_small_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Small Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_xsmall_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Extra Small Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_index_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Index Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_retention_adjustment_avg_sum

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Retention Adjustment Instances over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_stored_logs_avg_sum

\n
\n

int64

\n

Shows the average of all Flex Stored Logs over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

forwarding_events_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all logs forwarding bytes over all hours in the current month for all organizations (data available as of April 1, 2023)

\n
\n \n
\n
\n
\n
\n
\n

gcp_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all GCP hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

heroku_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Heroku dynos over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

incident_management_monthly_active_users_hwm_sum

\n
\n

int64

\n

Shows sum of the the high-water marks of incident management monthly active users in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

indexed_events_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all log events indexed over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

infra_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all distinct infrastructure hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ingested_events_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_agg_sum

\n
\n

int64

\n

Shows the sum of all IoT devices over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all IoT devices over all hours in the current month of all organizations.

\n
\n \n
\n
\n
\n
\n
\n

last_updated

\n
\n

date-time

\n

Shows the the most recent hour in the current month for all organizations for which all usages were calculated.

\n
\n \n
\n
\n
\n
\n
\n

live_indexed_events_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all live logs indexed over all hours in the current month for all organization (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

live_ingested_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all live logs bytes ingested over all hours in the current month for all organizations (data available as of December 1, 2020).

\n
\n \n
\n
\n
\n
\n
\n

logs_by_retention

\n
\n

object

\n

Object containing logs usage data broken down by retention period.

\n
\n
\n
\n
\n
\n

orgs

\n
\n

object

\n

Indexed logs usage summary for each organization for each retention period with usage.

\n
\n
\n
\n
\n
\n

usage

\n
\n

[object]

\n

Indexed logs usage summary for each organization.

\n
\n
\n
\n
\n
\n

usage

\n
\n

[object]

\n

Indexed logs usage for each active retention for the organization.

\n
\n
\n
\n
\n
\n

logs_indexed_logs_usage_sum

\n
\n

int64

\n

Total indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_live_indexed_logs_usage_sum

\n
\n

int64

\n

Live indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_rehydrated_indexed_logs_usage_sum

\n
\n

int64

\n

Rehydrated indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

retention

\n
\n

string

\n

The retention period in days or "custom" for all custom retention periods.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

usage

\n
\n

[object]

\n

Aggregated index logs usage for each retention period with usage.

\n
\n
\n
\n
\n
\n

logs_indexed_logs_usage_agg_sum

\n
\n

int64

\n

Total indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_live_indexed_logs_usage_agg_sum

\n
\n

int64

\n

Live indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_rehydrated_indexed_logs_usage_agg_sum

\n
\n

int64

\n

Rehydrated indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

retention

\n
\n

string

\n

The retention period in days or "custom" for all custom retention periods.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

usage_by_month

\n
\n

object

\n

Object containing a summary of indexed logs usage by retention period for a single month.

\n
\n
\n
\n
\n
\n

date

\n
\n

date-time

\n

The month for the usage.

\n
\n \n
\n
\n
\n
\n
\n

usage

\n
\n

[object]

\n

Indexed logs usage for each active retention for the month.

\n
\n
\n
\n
\n
\n

logs_indexed_logs_usage_sum

\n
\n

int64

\n

Total indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_live_indexed_logs_usage_sum

\n
\n

int64

\n

Live indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

logs_rehydrated_indexed_logs_usage_sum

\n
\n

int64

\n

Rehydrated indexed logs for this retention period.

\n
\n \n
\n
\n
\n
\n
\n

retention

\n
\n

string

\n

The retention period in days or "custom" for all custom retention periods.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

mobile_rum_lite_session_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile lite sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_android_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Android over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_flutter_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Flutter over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_ios_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on iOS over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_reactnative_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on React Native over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_roku_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Roku over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_units_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM units over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

ndm_netflow_events_agg_sum

\n
\n

int64

\n

Shows the sum of all Network Device Monitoring NetFlow events over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

netflow_indexed_events_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all Network flows indexed over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

npm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all distinct Cloud Network Monitoring hosts (formerly known as Network hosts) over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

observability_pipelines_bytes_processed_agg_sum

\n
\n

int64

\n

Sum of all observability pipelines bytes processed over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

oci_host_agg_sum

\n
\n

int64

\n

Shows the sum of Oracle Cloud Infrastructure hosts over all hours in the current months for all organizations

\n
\n \n
\n
\n
\n
\n
\n

oci_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of Oracle Cloud Infrastructure hosts over all hours in the current months for all organizations

\n
\n \n
\n
\n
\n
\n
\n

online_archive_events_count_agg_sum

\n
\n

int64

\n

Sum of all online archived events over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_apm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

profiling_aas_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all profiled Azure app services over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

profiling_container_agent_count_avg

\n
\n

int64

\n

Shows the average number of profiled containers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

profiling_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all profiled hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

rehydrated_indexed_events_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all rehydrated logs indexed over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rehydrated_ingested_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all rehydrated logs bytes ingested over all hours in the current month for all organizations (data available as of December 1, 2020).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_and_mobile_session_count

\n
\n

int64

\n

Shows the sum of all mobile sessions and all browser lite and legacy sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_legacy_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all browser RUM legacy sessions over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_lite_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all browser RUM lite sessions over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_replay_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all browser RUM Session Replay counts over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_lite_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all RUM lite sessions (browser and mobile) over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_android_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Android over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_flutter_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Flutter over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_ios_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on iOS over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_reactnative_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on React Native over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_roku_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Roku over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_android_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Android over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_flutter_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Flutter over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_ios_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on iOS over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_reactnative_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on React Native over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_roku_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_replay_session_count_android_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM replay sessions on Android over all hours within the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_replay_session_count_ios_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_replay_session_count_reactnative_agg_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

rum_replay_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of all RUM Session Replay counts over all hours in the current month for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_session_count_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM lite sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_total_session_count_agg_sum

\n
\n

int64

\n

Shows the sum of RUM sessions (browser and mobile) over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

rum_units_agg_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser and mobile RUM units over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_avg_sum

\n
\n

int64

\n

Shows the average of all Software Composition Analysis Fargate tasks over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_hwm_sum

\n
\n

int64

\n

Shows the sum of the high-water marks of all Software Composition Analysis Fargate tasks over all hours in the current months for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_apm_scanned_bytes_sum

\n
\n

int64

\n

Sum of all APM bytes scanned with sensitive data scanner in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_events_scanned_bytes_sum

\n
\n

int64

\n

Sum of all event stream events bytes scanned with sensitive data scanner in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_logs_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned of logs usage by the Sensitive Data Scanner over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_rum_scanned_bytes_sum

\n
\n

int64

\n

Sum of all RUM bytes scanned with sensitive data scanner in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_total_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned across all usage types by the Sensitive Data Scanner over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_azure_count_avg_sum

\n
\n

int64

\n

Sum of the average number of Serverless Apps for Azure in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_google_count_avg_sum

\n
\n

int64

\n

Sum of the average number of Serverless Apps for Google Cloud in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_total_count_avg_sum

\n
\n

int64

\n

Sum of the average number of Serverless Apps for Azure and Google Cloud in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

siem_analyzed_logs_add_on_count_agg_sum

\n
\n

int64

\n

Shows the sum of all log events analyzed by Cloud SIEM over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

start_date

\n
\n

date-time

\n

Shows the first date of usage in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_browser_check_calls_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Synthetic browser tests over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_check_calls_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Synthetic API tests over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_mobile_test_runs_agg_sum

\n
\n

int64

\n

Shows the sum of Synthetic mobile application tests over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_parallel_testing_max_slots_hwm_sum

\n
\n

int64

\n

Shows the sum of the high-water marks of used synthetics parallel testing slots over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

trace_search_indexed_events_count_agg_sum

\n
\n

int64

\n

Shows the sum of all Indexed Spans indexed over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

twol_ingested_events_bytes_agg_sum

\n
\n

int64

\n

Shows the sum of all ingested APM span bytes over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

universal_service_monitoring_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Universal Service Monitoring hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

usage

\n
\n

[object]

\n

An array of objects regarding hourly usage.

\n
\n
\n
\n
\n
\n

agent_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all agent hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_azure_app_service_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services using APM over all hours in the current date all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_devsecops_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all APM DevSecOps hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

apm_fargate_count_avg

\n
\n

int64

\n

Shows the average of all APM ECS Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

apm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct APM hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

appsec_fargate_count_avg

\n
\n

int64

\n

Shows the average of all Application Security Monitoring ECS Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

asm_serverless_sum

\n
\n

int64

\n

Shows the sum of all Application Security Monitoring Serverless invocations over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

audit_logs_lines_indexed_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of audit logs lines indexed over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

audit_trail_enabled_hwm

\n
\n

int64

\n

Shows the number of organizations that had Audit Trail enabled in the current date.

\n
\n \n
\n
\n
\n
\n
\n

avg_profiled_fargate_tasks

\n
\n

int64

\n

The average total count for Fargate Container Profiler over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all AWS hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_func_count

\n
\n

int64

\n

Shows the average of the number of functions that executed 1 or more times each hour in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_invocations_sum

\n
\n

int64

\n

Shows the sum of all AWS Lambda invocations over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

azure_app_service_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

billable_ingested_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_lite_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser lite sessions over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser replay sessions over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM units over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

ci_pipeline_indexed_spans_sum

\n
\n

int64

\n

Shows the sum of all CI pipeline indexed spans over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_test_indexed_spans_sum

\n
\n

int64

\n

Shows the sum of all CI test indexed spans over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_itr_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility intelligent test runner committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_pipeline_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility pipeline committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_test_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility test committers over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_aws_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for AWS for the given date and given organization.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_azure_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for Azure for the given date and given organization.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_gcp_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for GCP for the given date and given organization.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for all cloud providers for the given date and given organization.

\n
\n \n
\n
\n
\n
\n
\n

cloud_siem_events_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Information and Event Management events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sa_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all Static Analysis committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sca_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all static Software Composition Analysis committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_security_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Code Security hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

container_avg

\n
\n

int64

\n

Shows the average of all distinct containers over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

container_excl_agent_avg

\n
\n

int64

\n

Shows the average of containers without the Datadog Agent over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

container_hwm

\n
\n

int64

\n

Shows the high-water mark of all distinct containers over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_compliance_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise compliance containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_cws_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise Cloud Workload Security containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_total_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aas_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure app services hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aws_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise AWS hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_azure_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_compliance_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise compliance hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_cws_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Cloud Workload Security hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_gcp_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise GCP hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_total_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aas_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure app services hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro AWS hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_azure_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_avg

\n
\n

int64

\n

Shows the average number of Cloud Security Management Pro containers over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_hwm

\n
\n

int64

\n

Shows the high-water mark of Cloud Security Management Pro containers over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_gcp_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro GCP hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cspm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

custom_ts_avg

\n
\n

int64

\n

Shows the average number of distinct custom metrics over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_container_count_avg

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security containers over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_fargate_task_avg

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

cws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Workload Security hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

data_jobs_monitoring_host_hr_sum

\n
\n

int64

\n

Shows the sum of all Data Jobs Monitoring hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

date

\n
\n

date-time

\n

The date for the usage.

\n
\n \n
\n
\n
\n
\n
\n

dbm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Database Monitoring hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

dbm_queries_count_avg

\n
\n

int64

\n

Shows the average of all normalized Database Monitoring queries over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_agent_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with the Datadog Agent over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_alibaba_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Alibaba over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_aws_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on AWS over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_azure_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Azure over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_ent_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Enterprise over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_gcp_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on GCP over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_heroku_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Heroku over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_aas_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only Azure App Services over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_vsphere_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only vSphere over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_apm_sum

\n
\n

int64

\n

Shows the sum of all ephemeral APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_sum

\n
\n

int64

\n

Shows the sum of all ephemeral hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_pro_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_proplus_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro Plus over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_error_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking error events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_rum_error_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking RUM error events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_avg

\n
\n

int64

\n

The average number of Profiling Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_eks_avg

\n
\n

int64

\n

The average number of Profiling Fargate Elastic Kubernetes Service tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_avg

\n
\n

int64

\n

Shows the high-watermark of all Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_hwm

\n
\n

int64

\n

Shows the average of all Fargate tasks over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_large_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Large Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_medium_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Medium Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_small_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Small Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_xsmall_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Extra Small Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_index_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Index Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_retention_adjustment_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Retention Adjustment Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_stored_logs_avg

\n
\n

int64

\n

Shows the average of all Flex Stored Logs over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

forwarding_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes forwarded over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

gcp_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all GCP hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

heroku_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Heroku dynos over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

incident_management_monthly_active_users_hwm

\n
\n

int64

\n

Shows the high-water mark of incident management monthly active users over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

indexed_events_count_sum

\n
\n

int64

\n

Shows the sum of all log events indexed over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

infra_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct infrastructure hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

ingested_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_sum

\n
\n

int64

\n

Shows the sum of all IoT devices over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_top99p

\n
\n

int64

\n

Shows the 99th percentile of all IoT devices over all hours in the current date all organizations.

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_lite_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile lite sessions over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_android_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Android over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_flutter_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Flutter over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_ios_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on iOS over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_reactnative_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on React Native over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_roku_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Roku over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM units over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

ndm_netflow_events_sum

\n
\n

int64

\n

Shows the sum of all Network Device Monitoring NetFlow events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

netflow_indexed_events_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all Network flows indexed over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

npm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct Cloud Network Monitoring hosts (formerly known as Network hosts) over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

observability_pipelines_bytes_processed_sum

\n
\n

int64

\n

Sum of all observability pipelines bytes processed over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

oci_host_sum

\n
\n

int64

\n

Shows the sum of all Oracle Cloud Infrastructure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

oci_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Oracle Cloud Infrastructure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

online_archive_events_count_sum

\n
\n

int64

\n

Sum of all online archived events over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_apm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

orgs

\n
\n

[object]

\n

Organizations associated with a user.

\n
\n
\n
\n
\n
\n

account_name

\n
\n

string

\n

The account name.

\n
\n \n
\n
\n
\n
\n
\n

account_public_id

\n
\n

string

\n

The account public id.

\n
\n \n
\n
\n
\n
\n
\n

agent_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all agent hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

apm_azure_app_service_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services using APM over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

apm_devsecops_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all APM DevSecOps hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

apm_fargate_count_avg

\n
\n

int64

\n

Shows the average of all APM ECS Fargate tasks over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

apm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct APM hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

appsec_fargate_count_avg

\n
\n

int64

\n

Shows the average of all Application Security Monitoring ECS Fargate tasks over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

asm_serverless_sum

\n
\n

int64

\n

Shows the sum of all Application Security Monitoring Serverless invocations over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

audit_logs_lines_indexed_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all audit logs lines indexed over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

audit_trail_enabled_hwm

\n
\n

int64

\n

Shows whether Audit Trail is enabled for the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

avg_profiled_fargate_tasks

\n
\n

int64

\n

The average total count for Fargate Container Profiler over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

aws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all AWS hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_func_count

\n
\n

int64

\n

Shows the sum of all AWS Lambda invocations over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

aws_lambda_invocations_sum

\n
\n

int64

\n

Shows the sum of all AWS Lambda invocations over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

azure_app_service_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Azure app services over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

billable_ingested_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_lite_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser lite sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser replay sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

browser_rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM units over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

ci_pipeline_indexed_spans_sum

\n
\n

int64

\n

Shows the sum of all CI pipeline indexed spans over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

ci_test_indexed_spans_sum

\n
\n

int64

\n

Shows the sum of all CI test indexed spans over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_itr_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility intelligent test runner committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_pipeline_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility pipeline committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

ci_visibility_test_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all CI visibility test committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_aws_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for AWS for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_azure_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for Azure for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_gcp_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for GCP for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

cloud_cost_management_host_count_avg

\n
\n

int64

\n

Host count average of Cloud Cost Management for all cloud providers for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

cloud_siem_events_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Information and Event Management events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sa_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all Static Analysis committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_analysis_sca_committers_hwm

\n
\n

int64

\n

Shows the high-water mark of all static Software Composition Analysis committers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

code_security_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Code Security hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

container_avg

\n
\n

int64

\n

Shows the average of all distinct containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

container_excl_agent_avg

\n
\n

int64

\n

Shows the average of containers without the Datadog Agent over all hours in the current date for the given organization.

\n
\n \n
\n
\n
\n
\n
\n

container_hwm

\n
\n

int64

\n

Shows the high-water mark of all distinct containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_compliance_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise compliance containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_cws_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise Cloud Workload Security containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_container_enterprise_total_count_sum

\n
\n

int64

\n

Shows the sum of all Cloud Security Management Enterprise containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aas_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure app services hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_aws_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise AWS hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_azure_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Azure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_compliance_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise compliance hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_cws_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise Cloud Workload Security hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_gcp_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise GCP hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

csm_host_enterprise_total_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Enterprise hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aas_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure app services hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_aws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro AWS hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_azure_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro Azure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_avg

\n
\n

int64

\n

Shows the average number of Cloud Security Management Pro containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_container_hwm

\n
\n

int64

\n

Shows the high-water mark of Cloud Security Management Pro containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_gcp_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro GCP hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cspm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Security Management Pro hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

custom_historical_ts_avg

\n
\n

int64

\n

Shows the average number of distinct historical custom metrics over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

custom_live_ts_avg

\n
\n

int64

\n

Shows the average number of distinct live custom metrics over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

custom_ts_avg

\n
\n

int64

\n

Shows the average number of distinct custom metrics over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cws_container_count_avg

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security containers over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cws_fargate_task_avg

\n
\n

int64

\n

Shows the average of all distinct Cloud Workload Security Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

cws_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Cloud Workload Security hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

data_jobs_monitoring_host_hr_sum

\n
\n

int64

\n

Shows the sum of all Data Jobs Monitoring hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

dbm_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Database Monitoring hosts over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

dbm_queries_avg_sum

\n
\n

int64

\n

Shows the average of all distinct Database Monitoring normalized queries over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_agent_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with the Datadog Agent over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_alibaba_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Alibaba over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_aws_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on AWS over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_azure_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Azure over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_ent_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Enterprise over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_gcp_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on GCP over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_heroku_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts on Heroku over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_aas_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only Azure App Services over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_only_vsphere_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts with only vSphere over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_apm_sum

\n
\n

int64

\n

Shows the sum of all ephemeral APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_opentelemetry_sum

\n
\n

int64

\n

Shows the sum of all ephemeral hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_pro_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

eph_infra_host_proplus_sum

\n
\n

int64

\n

Shows the sum of all ephemeral infrastructure hosts for Pro Plus over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_error_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking error events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

error_tracking_rum_error_events_sum

\n
\n

int64

\n

Shows the sum of all Error Tracking RUM error events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_avg

\n
\n

int64

\n

The average number of Profiling Fargate tasks over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

fargate_container_profiler_profiling_fargate_eks_avg

\n
\n

int64

\n

The average number of Profiling Fargate Elastic Kubernetes Service tasks over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_avg

\n
\n

int64

\n

The average task count for Fargate.

\n
\n \n
\n
\n
\n
\n
\n

fargate_tasks_count_hwm

\n
\n

int64

\n

Shows the high-water mark of all Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_large_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Large Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_medium_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Medium Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_small_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Small Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_compute_xsmall_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Compute Extra Small Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_index_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Index Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_logs_starter_storage_retention_adjustment_avg

\n
\n

int64

\n

Shows the average number of Flex Logs Starter Storage Retention Adjustment Instances over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

flex_stored_logs_avg

\n
\n

int64

\n

Shows the average of all Flex Stored Logs over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

forwarding_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes forwarded over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

gcp_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all GCP hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

heroku_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Heroku dynos over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

The organization id.

\n
\n \n
\n
\n
\n
\n
\n

incident_management_monthly_active_users_hwm

\n
\n

int64

\n

Shows the high-water mark of incident management monthly active users over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

indexed_events_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all log events indexed over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

infra_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct infrastructure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

ingested_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all log bytes ingested over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_agg_sum

\n
\n

int64

\n

Shows the sum of all IoT devices over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

iot_device_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all IoT devices over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_lite_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile lite sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_android_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Android over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_flutter_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Flutter over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_ios_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on iOS over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_reactnative_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on React Native over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_roku_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions on Roku over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

mobile_rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all mobile RUM units over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

The organization name.

\n
\n \n
\n
\n
\n
\n
\n

ndm_netflow_events_sum

\n
\n

int64

\n

Shows the sum of all Network Device Monitoring NetFlow events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

netflow_indexed_events_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all Network flows indexed over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

npm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all distinct Cloud Network Monitoring hosts (formerly known as Network hosts) over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

observability_pipelines_bytes_processed_sum

\n
\n

int64

\n

Sum of all observability pipelines bytes processed over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

oci_host_sum

\n
\n

int64

\n

Shows the sum of all Oracle Cloud Infrastructure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

oci_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Oracle Cloud Infrastructure hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

online_archive_events_count_sum

\n
\n

int64

\n

Sum of all online archived events over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_apm_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of APM hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

opentelemetry_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all hosts reported by the Datadog exporter for the OpenTelemetry Collector over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

profiling_aas_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all profiled Azure app services over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

profiling_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all profiled hosts over all hours within the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

public_id

\n
\n

string

\n

The organization public id.

\n
\n \n
\n
\n
\n
\n
\n

region

\n
\n

string

\n

The region of the organization.

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_and_mobile_session_count

\n
\n

int64

\n

Shows the sum of all mobile sessions and all browser lite and legacy sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_legacy_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM legacy sessions over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_lite_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM lite sessions over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_lite_session_count_sum

\n
\n

int64

\n

Shows the sum of all RUM lite sessions (browser and mobile) over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_android_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Android over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_flutter_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Flutter over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_ios_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on iOS over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_reactnative_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on React Native over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_roku_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Roku over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_android_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Android over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_flutter_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Flutter over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_ios_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on iOS over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_reactnative_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on React Native over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_roku_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_replay_session_count_android_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_replay_session_count_ios_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_replay_session_count_reactnative_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

rum_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM lite sessions over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_total_session_count_sum

\n
\n

int64

\n

Shows the sum of RUM sessions (browser and mobile) over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser and mobile RUM units over all hours in the current date for the given org (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_avg

\n
\n

int64

\n

Shows the average of all Software Composition Analysis Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_hwm

\n
\n

int64

\n

Shows the sum of the high-water marks of all Software Composition Analysis Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_apm_scanned_bytes_sum

\n
\n

int64

\n

Sum of all APM bytes scanned with sensitive data scanner over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_events_scanned_bytes_sum

\n
\n

int64

\n

Sum of all event stream events bytes scanned with sensitive data scanner over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_logs_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned of logs usage by the Sensitive Data Scanner over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_rum_scanned_bytes_sum

\n
\n

int64

\n

Sum of all RUM bytes scanned with sensitive data scanner over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_total_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned across all usage types by the Sensitive Data Scanner over all hours in the current month for the given org.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_azure_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Azure for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_google_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Google Cloud for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_total_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Azure and Google Cloud for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

siem_analyzed_logs_add_on_count_sum

\n
\n

int64

\n

Shows the sum of all log events analyzed by Cloud SIEM over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_browser_check_calls_count_sum

\n
\n

int64

\n

Shows the sum of all Synthetic browser tests over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_check_calls_count_sum

\n
\n

int64

\n

Shows the sum of all Synthetic API tests over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_mobile_test_runs_sum

\n
\n

int64

\n

Shows the sum of all Synthetic mobile application tests over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_parallel_testing_max_slots_hwm

\n
\n

int64

\n

Shows the high-water mark of used synthetics parallel testing slots over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

trace_search_indexed_events_count_sum

\n
\n

int64

\n

Shows the sum of all Indexed Spans indexed over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

twol_ingested_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all ingested APM span bytes over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

universal_service_monitoring_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Universal Service Monitoring hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

vsphere_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all vSphere hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

vuln_management_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Application Vulnerability Management hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

workflow_executions_usage_sum

\n
\n

int64

\n

Sum of all workflows executed over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

profiling_aas_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all profiled Azure app services over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

profiling_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all profiled hosts over all hours within the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_and_mobile_session_count

\n
\n

int64

\n

Shows the sum of all mobile sessions and all browser lite and legacy sessions over all hours in the current month for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_legacy_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM legacy sessions over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_lite_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM lite sessions over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_browser_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all browser RUM Session Replay counts over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_lite_session_count_sum

\n
\n

int64

\n

Shows the sum of all RUM lite sessions (browser and mobile) over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_android_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Android over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_flutter_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy Sessions on Flutter over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_ios_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on iOS over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_reactnative_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on React Native over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_legacy_session_count_roku_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM legacy sessions on Roku over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_android_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Android over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_flutter_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Flutter over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_ios_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on iOS over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_reactnative_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on React Native over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_lite_session_count_roku_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_replay_session_count_android_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_replay_session_count_ios_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

rum_mobile_replay_session_count_reactnative_sum

\n
\n

int64

\n

Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

rum_replay_session_count_sum

\n
\n

int64

\n

Shows the sum of all RUM Session Replay counts over all hours in the current date for all organizations (To be introduced on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_session_count_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser RUM lite sessions over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

rum_total_session_count_sum

\n
\n

int64

\n

Shows the sum of RUM sessions (browser and mobile) over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

rum_units_sum

\n
\n

int64

\n

DEPRECATED: Shows the sum of all browser and mobile RUM units over all hours in the current date for all organizations (To be deprecated on October 1st, 2024).

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_avg

\n
\n

int64

\n

Shows the average of all Software Composition Analysis Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sca_fargate_count_hwm

\n
\n

int64

\n

Shows the sum of the high-water marks of all Software Composition Analysis Fargate tasks over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

sds_apm_scanned_bytes_sum

\n
\n

int64

\n

Sum of all APM bytes scanned with sensitive data scanner over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_events_scanned_bytes_sum

\n
\n

int64

\n

Sum of all event stream events bytes scanned with sensitive data scanner over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_logs_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned of logs usage by the Sensitive Data Scanner over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_rum_scanned_bytes_sum

\n
\n

int64

\n

Sum of all RUM bytes scanned with sensitive data scanner over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

sds_total_scanned_bytes_sum

\n
\n

int64

\n

Shows the sum of all bytes scanned across all usage types by the Sensitive Data Scanner over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_azure_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Azure for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_google_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Google Cloud for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

serverless_apps_total_count_avg

\n
\n

int64

\n

Shows the average of the number of Serverless Apps for Azure and Google Cloud for the given date and given org.

\n
\n \n
\n
\n
\n
\n
\n

siem_analyzed_logs_add_on_count_sum

\n
\n

int64

\n

Shows the sum of all log events analyzed by Cloud SIEM over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_browser_check_calls_count_sum

\n
\n

int64

\n

Shows the sum of all Synthetic browser tests over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_check_calls_count_sum

\n
\n

int64

\n

Shows the sum of all Synthetic API tests over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_mobile_test_runs_sum

\n
\n

int64

\n

Shows the sum of all Synthetic mobile application tests over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

synthetics_parallel_testing_max_slots_hwm

\n
\n

int64

\n

Shows the high-water mark of used synthetics parallel testing slots over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

trace_search_indexed_events_count_sum

\n
\n

int64

\n

Shows the sum of all Indexed Spans indexed over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

twol_ingested_events_bytes_sum

\n
\n

int64

\n

Shows the sum of all ingested APM span bytes over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

universal_service_monitoring_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all universal service management hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

vsphere_host_top99p

\n
\n

int64

\n

Shows the 99th percentile of all vSphere hosts over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

vuln_management_host_count_top99p

\n
\n

int64

\n

Shows the 99th percentile of all Application Vulnerability Management hosts over all hours in the current date for the given org.

\n
\n \n
\n
\n
\n
\n
\n

workflow_executions_usage_sum

\n
\n

int64

\n

Sum of all workflows executed over all hours in the current date for all organizations.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

vsphere_host_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all vSphere hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

vuln_management_host_count_top99p_sum

\n
\n

int64

\n

Shows the 99th percentile of all Application Vulnerability Management hosts over all hours in the current month for all organizations.

\n
\n \n
\n
\n
\n
\n
\n

workflow_executions_usage_agg_sum

\n
\n

int64

\n

Sum of all workflows executed over all hours in the current month for all organizations.

\n
\n \n
\n
" }, "400": { "json": { diff --git a/content/en/api/v2/agentless-scanning/_index.md b/content/en/api/v2/agentless-scanning/_index.md new file mode 100644 index 0000000000000..ec1b1b73b89fe --- /dev/null +++ b/content/en/api/v2/agentless-scanning/_index.md @@ -0,0 +1,4 @@ +--- +title: Agentless Scanning +headless: true +--- diff --git a/content/en/api/v2/agentless-scanning/examples.json b/content/en/api/v2/agentless-scanning/examples.json new file mode 100644 index 0000000000000..17ee84caa423a --- /dev/null +++ b/content/en/api/v2/agentless-scanning/examples.json @@ -0,0 +1,44 @@ +{ + "ListAwsScanOptions": { + "responses": { + "200": { + "json": { + "data": [ + { + "attributes": { + "lambda": true, + "sensitive_data": false, + "vuln_containers_os": true, + "vuln_host_os": true + }, + "id": "184366314700", + "type": "aws_scan_options" + } + ] + }, + "html": "
\n
\n
\n
\n

data

\n
\n

[object]

\n

A list of AWS scan options.

\n
\n
\n
\n
\n
\n

attributes

\n
\n

object

\n

Attributes for the AWS scan options.

\n
\n
\n
\n
\n
\n

lambda

\n
\n

boolean

\n

Indicates if scanning of Lambda functions is enabled.

\n
\n \n
\n
\n
\n
\n
\n

sensitive_data

\n
\n

boolean

\n

Indicates if scanning for sensitive data is enabled.

\n
\n \n
\n
\n
\n
\n
\n

vuln_containers_os

\n
\n

boolean

\n

Indicates if scanning for vulnerabilities in containers is enabled.

\n
\n \n
\n
\n
\n
\n
\n

vuln_host_os

\n
\n

boolean

\n

Indicates if scanning for vulnerabilities in hosts is enabled.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

The ID of the AWS account.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the resource. The value should always be aws_scan_options. \nAllowed enum values: aws_scan_options

default: aws_scan_options

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": {}, + "json": {}, + "html": "" + } + } +} \ No newline at end of file diff --git a/content/en/cloud_cost_management/azure.md b/content/en/cloud_cost_management/azure.md index 5070f272f227a..f0467cdce6d4e 100644 --- a/content/en/cloud_cost_management/azure.md +++ b/content/en/cloud_cost_management/azure.md @@ -147,6 +147,8 @@ If your exports are in different storage containers, repeat steps one to seven f This ensures complete cost accuracy by allowing periodic cost calculations against Microsoft Cost Management. +**Note**: Data can take up to 48 to 72 hours after setup to stabilize in Datadog. + [1]: https://portal.azure.com/#view/Microsoft_Azure_Billing/SubscriptionsBlade {{% /tab %}} diff --git a/content/en/cloud_cost_management/google_cloud.md b/content/en/cloud_cost_management/google_cloud.md index 4abf8ea1faefa..365dbd9a1b51b 100644 --- a/content/en/cloud_cost_management/google_cloud.md +++ b/content/en/cloud_cost_management/google_cloud.md @@ -128,6 +128,8 @@ If your integrated Service Account exists in a different Google Cloud Platform p ### Configure Cloud Costs Continue to follow the steps indicated in [Setup & Configuration][3]. +**Note**: Data can take up to 48 to 72 hours after setup to stabilize in Datadog. + ## Cost types You can visualize your ingested data using the following cost types: diff --git a/content/en/cloud_cost_management/multisource_querying/_index.md b/content/en/cloud_cost_management/multisource_querying/_index.md index 089cb7ed4c68b..427aec2bcdb31 100644 --- a/content/en/cloud_cost_management/multisource_querying/_index.md +++ b/content/en/cloud_cost_management/multisource_querying/_index.md @@ -3,6 +3,9 @@ title: Multisource Querying is_beta: true description: Learn how to use Multisource Querying to query costs across multiple providers in Cloud Cost Management. further_reading: +- link: "https://www.datadoghq.com/blog/focus-cost-data/" + tag: "Blog" + text: "Monitor your multi-cloud costs with Cloud Cost Management and FOCUS" - link: "/cloud_cost_management/" tag: "Documentation" text: "Learn about Cloud Cost Management" diff --git a/content/en/cloudcraft/faq/account-data-storage.md b/content/en/cloudcraft/faq/account-data-storage.md index 7560e2849e5d5..84f00c86e630f 100644 --- a/content/en/cloudcraft/faq/account-data-storage.md +++ b/content/en/cloudcraft/faq/account-data-storage.md @@ -6,7 +6,7 @@ Cloudcraft stores data in the AWS `us-east-1` (North Virginia) region for public All data is encrypted at rest and in transit using industry best practices. -Cloudcraft also implements rigorous security processes. For more information, see the [Cloudcraft security page][1] and [SOC 2 Type I report][2]. +Cloudcraft also implements rigorous security processes. For more information, see the [Cloudcraft security page][1] and [SOC 2 Type II report][2]. If you are interested in storing your data in Europe, contact [the sales team][3] for more information. diff --git a/content/en/containers/amazon_ecs/apm.md b/content/en/containers/amazon_ecs/apm.md index 8e26b7668336b..dfe2e733fb117 100644 --- a/content/en/containers/amazon_ecs/apm.md +++ b/content/en/containers/amazon_ecs/apm.md @@ -200,7 +200,7 @@ end {{< /programming-lang >}} -{{< programming-lang lang="go" >}} +{{< programming-lang lang="go">}} #### Launch time variable Update the Task Definition's `entryPoint` with the following, substituting your ``: @@ -222,7 +222,8 @@ package main import ( "net/http" "io/ioutil" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func main() { diff --git a/content/en/containers/docker/apm.md b/content/en/containers/docker/apm.md index 4a9cfd44c8ddd..4c3935b346f51 100644 --- a/content/en/containers/docker/apm.md +++ b/content/en/containers/docker/apm.md @@ -280,7 +280,10 @@ end ```go package main -import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +import ( + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x +) func main() { tracer.Start(tracer.WithAgentAddr("datadog-agent:8126")) diff --git a/content/en/continuous_integration/pipelines/custom.md b/content/en/continuous_integration/pipelines/custom.md index bbf10de98083c..2b33ea5ef8b5a 100644 --- a/content/en/continuous_integration/pipelines/custom.md +++ b/content/en/continuous_integration/pipelines/custom.md @@ -69,6 +69,7 @@ To send pipeline events programmatically to Datadog, ensure that your [`DD_API_K { "data": { "attributes": { + "provider_name": "", "resource": { "level": "pipeline", "unique_id": "b3262537-a573-44eb-b777-4c0f37912b05", diff --git a/content/en/continuous_integration/pipelines/github.md b/content/en/continuous_integration/pipelines/github.md index ba7eff01d21ef..742db8a94a107 100644 --- a/content/en/continuous_integration/pipelines/github.md +++ b/content/en/continuous_integration/pipelines/github.md @@ -1,5 +1,5 @@ --- -title: Set up Tracing on GitHub Actions Workflows +title: Set up CI Visibility on GitHub Actions Workflows aliases: - /continuous_integration/setup_pipelines/github further_reading: @@ -25,7 +25,7 @@ further_reading: [GitHub Actions][1] is an automation tool that allows you to build, test, and deploy your code in GitHub. Create workflows that automate every step of your development process, streamlining software updates and enhancing code quality with CI/CD features integrated into your repositories. -Set up tracing in GitHub Actions to track the execution of your workflows, identify performance bottlenecks, troubleshoot operational issues, and optimize your deployment processes. +Set up CI Visibility in GitHub Actions to track the execution of your workflows, identify performance bottlenecks, troubleshoot operational issues, and optimize your deployment processes. ### Compatibility @@ -61,7 +61,7 @@ The [GitHub Actions][1] integration uses a private [GitHub App][11] to collect w 6. Give the app a name, for example, `Datadog CI Visibility`. 7. Click **Install GitHub App** and follow the instructions on GitHub. -### Configure tracing for GitHub Actions +### Configure CI Visibility for GitHub Actions After the GitHub App is created and installed, enable CI Visibility on the accounts and/or repositories you want visibility into. @@ -72,14 +72,13 @@ After the GitHub App is created and installed, enable CI Visibility on the accou Pipelines appear immediately after enabling CI Visibility for any account or repository. -### Disable GitHub Actions tracing +### Disable CI Visiblity for GitHub Actions -To disable the CI Visibility GitHub Actions integration, make sure the GitHub app is no longer subscribed to the -workflow job and workflow run events. To remove the events: +To disable the CI Visibility GitHub Actions integration: -1. Go to the [GitHub Apps][14] page. -2. Click **Edit > Permission & events** on the relevant Datadog GitHub App (if you have multiple apps, you have to repeat the process for each). -3. Scroll to the **Subscribe to events** section, and make sure that **Workflow job** and **Workflow run** are not selected. +1. Go to the [CI GitHub Settings][14] page. +2. Choose the GitHub account that you want to disable CI Visibility for, and click **Account Enabled**. +3. Untoggle **Enable CI Visibility**, or choose which repository you want to disable it for individually. ### Collect job logs @@ -125,7 +124,7 @@ The **CI Pipeline List** page shows data for only the default branch of each rep [11]: https://docs.github.com/developers/apps/getting-started-with-apps/about-apps [12]: https://app.datadoghq.com/integrations/github/ [13]: https://app.datadoghq.com/ci/setup/pipeline?provider=github -[14]: https://github.com/settings/apps +[14]: https://app.datadoghq.com/ci/settings/provider [15]: /logs/ [16]: /logs/guide/best-practices-for-log-management/ [17]: https://app.datadoghq.com/ci/pipelines diff --git a/content/en/dashboards/guide/_index.md b/content/en/dashboards/guide/_index.md index 7ee76706225b2..60a0044802769 100644 --- a/content/en/dashboards/guide/_index.md +++ b/content/en/dashboards/guide/_index.md @@ -23,7 +23,7 @@ cascade: {{< nextlink href="dashboards/guide/context-links" >}}Context Links{{< /nextlink >}} {{< nextlink href="dashboards/guide/unit-override" >}}Unit override{{< /nextlink >}} {{< nextlink href="dashboards/guide/how-to-use-terraform-to-restrict-dashboards" >}}Using Terraform to restrict the editing of a dashboard{{< /nextlink >}} - {{< nextlink href="dashboards/guide/is-read-only-deprecation" >}}Dashboards API: Migrate from is_read_only by November 30, 2024{{< /nextlink >}} + {{< nextlink href="dashboards/guide/is-read-only-deprecation" >}}Dashboards API: Migrate from is_read_only by December 13, 2024{{< /nextlink >}} {{< nextlink href="dashboards/guide/embeddable-graphs-with-template-variables" >}}Embeddable graphs with template variables{{< /nextlink >}} {{< nextlink href="dashboards/guide/unable-to-iframe" >}}Why am I unable to iFrame certain HTTPS URLs?{{< /nextlink >}} {{< nextlink href="dashboards/guide/powerpacks-best-practices" >}}Powerpacks best practices{{< /nextlink >}} diff --git a/content/en/dashboards/sharing/scheduled_reports.md b/content/en/dashboards/sharing/scheduled_reports.md index d034c22e47f37..8c4147cd82cfd 100644 --- a/content/en/dashboards/sharing/scheduled_reports.md +++ b/content/en/dashboards/sharing/scheduled_reports.md @@ -31,34 +31,42 @@ The report PDF is included as an email attachment or as a link, depending on its Create a report from any [dashboard or timeboard][1] that has at least one [supported widget](#unsupported-widget-types). -Click the **Share** button at the top of your dashboard and select **Schedule a Report**. +Click the **Share** button at the top of your dashboard and select **Schedule report**. -{{< img src="dashboards/scheduled_reports/report_configuration_modal.png" alt="The configuration modal for an individual dashboard report, with sections to set a schedule, add recipients, and customize email. At the bottom of the modal are buttons to edit template variables, delete report, send preview, cancel, and save" style="width:90%;" >}} +### 1. Set a schedule -### Set a schedule +In the configuration modal that opens, set a schedule for the report to determine when and how often the report is sent. -In the configuration modal that opens, set a schedule for the report to determine when and how often the report is sent. Set a time frame to determine the range of time displayed in the resulting report. The report time frame can be different from the time frame displayed on the dashboard. +**{{< img src="dashboards/scheduled_reports/set_schedule.png" alt="Section for defining a report schedule. Includes a schedule preview table showing the next 5 scheduled report dates." style="width:90%;" >}}** -### Add recipients +### 2. Configure report -Add recipients to your report by entering their email addresses. The email associated with your Datadog account is automatically added as a recipient. You can remove yourself as a recipient by hovering over your email and clicking the **X** that appears next to it. +Define the report title and set a time frame to determine the range of time displayed in the resulting report. The report time frame can be different from the time frame displayed on the dashboard. -**Note:** Enterprise and Pro accounts can send reports to recipients outside of their organizations. +**Note:** Modifying the report time frame updates the **Schedule Preview** dropdown table above. + +**{{< img src="dashboards/scheduled_reports/configure_report.png" alt="Section for defining a report schedule" style="width:90%;" >}}** + +Click **Edit Variables** to modify the filters applied when the report is sent. These values do not affect the dashboard's default template variable values. -### Customize the report +**{{< img src="dashboards/scheduled_reports/edit_variables.png" alt="The configuration modal section for customizing the report title, timeframe and variables." style="width:90%;" >}}** -Finally, customize the report to provide recipients with more context or a tailored view. The optional description is included in the report email body. +### 3. Add recipients + +Add recipients to your report by entering their email addresses. The email associated with your Datadog account is automatically added as a recipient. You can remove yourself as a recipient by hovering over your email and clicking the trash icon that appears next to it. + +**Note:** Enterprise and Pro accounts can send reports to recipients outside of their organizations. -Click **Edit Template Variables** to modify the filters applied when the report is sent. These values do not affect the dashboard's default template variable values. +**{{< img src="dashboards/scheduled_reports/add_recipients.png" alt="The configuration modal for editing scheduled report variables." style="width:90%;" >}}** -To see the report before saving the schedule, click **Send Preview**. You can pause a report schedule at any time. +To see the report before saving the schedule, click **Send Test Email**. You can pause a report schedule at any time. ## Managing reports A single dashboard can have multiple scheduled reports with different settings, which allows you to inform different groups of stakeholders interested in the same dashboard. To see the reports on an existing dashboard, click the **Share** button and select **Configure Reports**. From the configuration modal that opens, you can pause an existing report or create a new report. To see and edit the details of an existing report, or delete the report, click **Edit**. -{{< img src="dashboards/scheduled_reports/scheduled_reports_configuration_modal.png" alt="The configuration modal for scheduled reports, with two reports displayed, each showing their titles, tags, recipients, frequency, an option to toggle the report on or off, and a button to edit the report. At the bottom is a button to add a new report and a done button" style="width:90%;" >}} +{{< img src="dashboards/scheduled_reports/manage_reports.png" alt="The configuration modal for scheduled reports, with two reports displayed, each showing their titles, tags, recipients, frequency, an option to toggle the report on or off, and a button to edit the report. At the bottom is a button to add a new report and a cancel button" style="width:90%;" >}} ## Permissions diff --git a/content/en/dashboards/widgets/geomap.md b/content/en/dashboards/widgets/geomap.md index ed40130648b3d..f5f1dbe77211b 100644 --- a/content/en/dashboards/widgets/geomap.md +++ b/content/en/dashboards/widgets/geomap.md @@ -71,6 +71,10 @@ The geomap widget visualizes geographic data with shaded regions or points. It c [Context links][7] are enabled by default, you can toggle them on or off. Context links connect dashboard widgets with other pages (in Datadog, or third-party). +#### Visual formatting rules + +Customize the region layer color of your Geomap widget with conditional rules. + ## API This widget can be used with the **[Dashboards API][8]**. See the following table for the [widget JSON schema definition][9]: diff --git a/content/en/data_streams/go.md b/content/en/data_streams/go.md index 36dab9810f419..90ba2747670dc 100644 --- a/content/en/data_streams/go.md +++ b/content/en/data_streams/go.md @@ -42,7 +42,8 @@ To manually instrument the Sarama Kafka client with Data Streams Monitoring: ```go import ( - ddsarama "gopkg.in/DataDog/dd-trace-go.v1/contrib/Shopify/sarama" + ddsarama "gopkg.in/DataDog/dd-trace-go.v1/contrib/Shopify/sarama" // 1.x + // ddsarama "github.com/DataDog/dd-trace-go/contrib/Shopify/sarama/v2" // 2.x ) 2. Wrap the producer with `ddsarama.WrapAsyncProducer` @@ -63,7 +64,8 @@ To manually instrument Confluent Kafka with Data Streams Monitoring: ```go import ( - ddkafka "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/kafka.v2" + ddkafka "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/kafka.v2" // 1.x + // ddkafka "github.com/DataDog/dd-trace-go/contrib/confluentinc/confluent-kafka-go/kafka.v2/v2" // 2.x ) ``` diff --git a/content/en/database_monitoring/connect_dbm_and_apm.md b/content/en/database_monitoring/connect_dbm_and_apm.md index 4883600a25f02..ed086e6520149 100644 --- a/content/en/database_monitoring/connect_dbm_and_apm.md +++ b/content/en/database_monitoring/connect_dbm_and_apm.md @@ -96,31 +96,34 @@ DD_VERSION=(application version) {{% tab "Go" %}} Update your app dependencies to include [dd-trace-go@v1.44.0][1] or greater: -``` -go get gopkg.in/DataDog/dd-trace-go.v1@v1.44.0 +```shell +go get gopkg.in/DataDog/dd-trace-go.v1@v1.44.0 # 1.x +# go get github.com/DataDog/dd-trace-go/v2 # 2.x ``` Update your code to import the `contrib/database/sql` package: ```go import ( "database/sql" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" - sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x + // sqltrace "github.com/DataDog/dd-trace-go/contrib/database/sql/v2" // 2.x ) ``` Enable the database monitoring propagation feature using one of the following methods: -1. Env variable: +- Env variable: `DD_DBM_PROPAGATION_MODE=full` -2. Using code during the driver registration: +- Using code during the driver registration: ```go - sqltrace.Register("postgres", &pq.Driver{}, sqltrace.WithDBMPropagation(tracer.DBMPropagationModeFull), sqltrace.WithServiceName("my-db-service")) + sqltrace.Register("postgres", &pq.Driver{}, sqltrace.WithDBMPropagation(tracer.DBMPropagationModeFull), sqltrace.WithService("my-db-service")) ``` -3. Using code on `sqltrace.Open`: +- Using code on `sqltrace.Open`: ```go - sqltrace.Register("postgres", &pq.Driver{}, sqltrace.WithServiceName("my-db-service")) + sqltrace.Register("postgres", &pq.Driver{}, sqltrace.WithService("my-db-service")) db, err := sqltrace.Open("postgres", "postgres://pqgotest:password@localhost/pqgotest?sslmode=disable", sqltrace.WithDBMPropagation(tracer.DBMPropagationModeFull)) if err != nil { @@ -132,8 +135,10 @@ Full example: ```go import ( "database/sql" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" - sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x + // sqltrace "github.com/DataDog/dd-trace-go/contrib/database/sql/v2" // 2.x ) func main() { @@ -196,14 +201,12 @@ public class Application { } ``` -**Note**: - -**Tracer versions 1.44 and greater**: +**Tracer versions 1.44 and above**: Enable the prepared statements tracing for Postgres using **one** of the following methods: - Set the system property `dd.dbm.trace_prepared_statements=true` - Set the environment variable `export DD_DBM_TRACE_PREPARED_STATEMENTS=true` -The prepared statements instrumentation will overwrite the `Application` property, and will cause an extra roundtrip to the database. +**Note**: The prepared statements instrumentation overwrites the `Application` property and causes an extra roundtrip to the database. This additional roundtrip has a negligible impact on latency. **Tracer versions below 1.44**: Prepared statements are not supported in `full` mode for Postgres and MySQL, and all JDBC API calls that use prepared statements are automatically downgraded to `service` mode. Since most Java SQL libraries use prepared statements by default, this means that **most** Java applications are only able to use `service` mode. diff --git a/content/en/developers/ide_plugins/visual_studio/_index.md b/content/en/developers/ide_plugins/visual_studio/_index.md index 6f22a5af7e743..9f2240df40179 100644 --- a/content/en/developers/ide_plugins/visual_studio/_index.md +++ b/content/en/developers/ide_plugins/visual_studio/_index.md @@ -81,7 +81,16 @@ When you start editing a source file supported by Static Analysis, the extension 4. Open a solution in Visual Studio. 5. Go to **Extensions > Datadog > Linked Services**. 6. Add services, and save your solution. -7. Go to **Extensions > Datadog > Code Insights**. +7. Go to **Tools > Options > Datadog** and enable **Code Insights**. +8. Sign in to Datadog by clicking the plus (**+**) icon. Changes do not affect existing connections to Datadog. + +### Custom subdomains + +If your organization uses a [custom sub-domain][23], set the custom URL as the Datadog server address in the extension settings: +1. Click **Tools > Options > Datadog**. +1. If you're editing an existing connection to Datadog, sign out of the connection before you edit the server address. Changes do not affect existing connections. +1. Under **Advanced**, set your custom URL as the Datadog server address. +1. Click the plus (**+**) icon to sign in. ## Feedback @@ -115,3 +124,4 @@ If you don't wish to send this data to Datadog, you can opt out at any time in t [20]: /code_analysis/static_analysis_rules/ [21]: https://www.datadoghq.com/legal/privacy/ [22]: https://www.datadoghq.com/legal/eula/ +[23]: /account_management/multi_organization/#custom-sub-domains diff --git a/content/en/dora_metrics/setup/deployments.md b/content/en/dora_metrics/setup/deployments.md index 630adefaa482a..0bf5069659c45 100644 --- a/content/en/dora_metrics/setup/deployments.md +++ b/content/en/dora_metrics/setup/deployments.md @@ -254,6 +254,7 @@ If the two metadata entries are defined for a service, only `extensions[datadogh - Change lead time stage breakdown metrics are only available for GitHub. - Change lead time is not available for the first deployment of a service that includes Git information. +- The Change Lead Time calculation includes a maximum of 5000 commits per deployment. - For rebased branches, *change lead time* calculations consider the new commits created during the rebase, not the original commits. - When using "Squash" to merge pull requests: - For GitHub: Metrics are emitted for the original commits. diff --git a/content/en/error_tracking/frontend/browser.md b/content/en/error_tracking/frontend/browser.md index 05141c0cd9a2f..7a78db4d25f3b 100644 --- a/content/en/error_tracking/frontend/browser.md +++ b/content/en/error_tracking/frontend/browser.md @@ -43,12 +43,7 @@ See the [Debug Symbols][15] page to view all uploaded symbols. ### Limitations -{{< site-region region="us,us3,us5,eu,gov" >}} -Source maps are limited to **500** MB each. -{{< /site-region >}} -{{< site-region region="ap1" >}} -Source maps are limited to **500** MB each. -{{< /site-region >}} +Source maps are limited in size to **500 MB** each. ## Collect errors diff --git a/content/en/getting_started/workflow_automation/_index.md b/content/en/getting_started/workflow_automation/_index.md index 92ab45c431c20..32c9355286323 100644 --- a/content/en/getting_started/workflow_automation/_index.md +++ b/content/en/getting_started/workflow_automation/_index.md @@ -157,9 +157,9 @@ The workflow also creates a Slack message to notify your team of the Jira issue [5]: https://app.datadoghq.com/workflow [6]: /workflows/build/#context-variables [7]: https://app.datadoghq.com/monitors/manage -[8]: /service_management/workflows/actions_catalog/ +[8]: /actions/actions_catalog/ [9]: /workflows/build -[10]: /service_management/workflows/actions_catalog/generic_actions/#http -[11]: /service_management/workflows/actions_catalog/generic_actions/#data-transformation +[10]: /service_management/workflows/actions/#http +[11]: /service_management/workflows/actions/#data-transformation diff --git a/content/en/infrastructure/containers/amazon_elastic_container_explorer.md b/content/en/infrastructure/containers/amazon_elastic_container_explorer.md index 8198dc66dc727..e2f9acd08e2eb 100644 --- a/content/en/infrastructure/containers/amazon_elastic_container_explorer.md +++ b/content/en/infrastructure/containers/amazon_elastic_container_explorer.md @@ -156,7 +156,11 @@ Some resources have specific tags. The following tags are available in addition | **Cluster** | ~15 minutes | ~15 minutes | | **Task** | ~15 seconds | ~24 hours | | **Task Definition** | ~15 seconds | ~24 hours | -| **Service** | ~24 hours | ~24 hours | +| **Service** | ~15 seconds | ~24 hours | +| **Container Instance** | ~24 hours | ~24 hours | + +* Installing the Datadog Agent in your cluster enables visibility into task lifecycle changes. Without the Datadog Agent, stopped tasks can appear as running for up to two days. +* Installing the Datadog Agent in your cluster provides additional, relevant host-level tags, such as `availability_zone`. [1]: https://app.datadoghq.com/orchestration/explorer/ecsTask [2]: /integrations/amazon_ecs diff --git a/content/en/infrastructure/resource_catalog/governance/_index.md b/content/en/infrastructure/resource_catalog/governance/_index.md new file mode 100644 index 0000000000000..8195e4c602653 --- /dev/null +++ b/content/en/infrastructure/resource_catalog/governance/_index.md @@ -0,0 +1,81 @@ +--- +title: Governance +description: Learn how to create infrastructure governance policies in Resource Catalog. +further_reading: + - link: "/infrastructure/resource_catalog/" + tag: "Documentation" + text: "Datadog Resource Catalog" + - link: "https://www.datadoghq.com/blog/datadog-resource-catalog/" + tag: "Blog" + text: "Govern your infrastructure resources with the Datadog Resource Catalog" +--- + +{{< callout url="https://www.datadoghq.com/product-preview/infra-governance-policies/" btn_hidden="false" header="Join the Preview!">}} + Governance is in Preview. +{{< /callout >}} + +## Overview + +{{< img src="/infrastructure/resource_catalog/governance/custom-policy-list-1.png" alt="The Resource Catalog page showing the Governance tab and list of custom policies" width="100%">}} + + +In the Resource Catalog's Governance view, you can define policies on your infrastructure resources based on governance best practices in your organization, such as improving ownership tag coverage on resources or ensuring versioning on critical resources is up-to-date. Instead of writing custom scripts or lambdas that scan every resource, Datadog gives you visibility into problematic resources so that you can focus on remediation. + +Specifically, you can: + +- Define a [custom policy](#create-a-custom-policy), which involves choosing a resource type, the attribute on the resource type, and target values the attribute should have. +- Define a [tagging policy](#create-a-tagging-policy), which involves a resource type and the desired tag key and value the resource type should have. +- Access a dedicated view for each policy where you can see its list of non-compliant resources and compliance score. +- Filter, group, and export the list of non-compliant resources for a policy so you can prioritize and assign the work. + +## Create a custom policy + +{{< img src="/infrastructure/resource_catalog/governance/custom-policy-example-1.png" alt="A custom policy reflecting a compliance score of thirty percent." width="100%">}} + +Custom policies require specific values in your cloud resource attributes within Datadog based on your organization's infrastructure best practices. + +To create a custom policy: + +1. Navigate to **Infrastructure > Resource Catalog** and click the [**Governance**][1] tab. +2. Click the **New Custom Policy** button. +3. Select a resource type from the dropdown menu. +4. Optionally, search for additional dataset filters. +5. Select a target resource attribute and desired value. +6. Optionally, add instructions for remediation. +7. A name is automatically generated based on the data entered, but you can modify it. +8. Click **Create Custom Policy**. + +Click the new policy to review all non-compliant resources and filter them by region, environment, account, service, or team. You can also group them by attributes or tags. + +## Create a tagging policy + +Tagging policies require specific tag keys and tag value formats on your infrastructure resources across Datadog. + +To create a tagging policy: + +1. Navigate to **Infrastructure > Resource Catalog** and click the [**Governance**][1] tab. +2. Click the **New Tagging Policy** button. +3. Choose the resource types the policy applies to. +4. Define the required tag key and its allowed values. +5. A name is automatically generated based on the data entered, but you can modify it. +6. Click **Create Tagging Policy**. + +Click the new policy to review all non-compliant resources and filter them by cloud, region, environment, account, service, team, or tag. You can also group them by attributes or tags. + +[1]: https://app.datadoghq.com/infrastructure/catalog/governance + +## Updating policies + +To update a policy, click the policy, then click the **Edit** button and modify as needed. + +## Deleting policies + +To delete a custom or tagging policy, click the policy, then click the **Delete** button. + +## Exporting policies + +To export the list of non-compliant resources for a policy, click the policy, then click the **Export as CSV** button. + +## Further Reading + +{{< partial name="whats-next/whats-next.html" >}} \ No newline at end of file diff --git a/content/en/integrations/guide/azure-programmatic-management.md b/content/en/integrations/guide/azure-programmatic-management.md index 0e198be1b65f4..b4477935c9e30 100644 --- a/content/en/integrations/guide/azure-programmatic-management.md +++ b/content/en/integrations/guide/azure-programmatic-management.md @@ -141,7 +141,7 @@ The protected settings include: SETTINGS protected_settings = <" + "api_key": "" } PROTECTED_SETTINGS ``` diff --git a/content/en/llm_observability/_index.md b/content/en/llm_observability/_index.md index bb1e160f69bb4..b60018b1b0a32 100644 --- a/content/en/llm_observability/_index.md +++ b/content/en/llm_observability/_index.md @@ -68,8 +68,6 @@ For more information, see the [Auto Instrumentation documentation][8]. ## Ready to start? -
By using LLM Observability, you acknowledge that Datadog is authorized to share your company's data with OpenAI LLC for the purpose of providing and improving LLM Observability. OpenAI will not use your data for training or tuning purposes. If you have any questions or want to opt out of features that depend on OpenAI, reach out to your account representative.
- See the [Setup documentation][5] for instructions on instrumenting your LLM application or follow the [Trace an LLM Application guide][6] to generate a trace using the [LLM Observability SDK for Python][3]. ## Further Reading diff --git a/content/en/llm_observability/configuration/_index.md b/content/en/llm_observability/configuration/_index.md index 9e8f986b04893..c33905b023598 100644 --- a/content/en/llm_observability/configuration/_index.md +++ b/content/en/llm_observability/configuration/_index.md @@ -47,6 +47,17 @@ After you click **Save**, LLM Observability invokes a `GPT-4o mini` model using For more information about evaluations, see [Terms and Concepts][1]. +### Estimated Token Usage + +LLM Observability provides metrics to help you monitor and manage the token usage associated with evaluations that power LLM Observability. The following metrics allow you to track the LLM resources consumed to power evaluations: + + +- `ml_obs.estimated_usage.llm.input.tokens` +- `ml_obs.estimated_usage.llm.output.tokens` +- `ml_obs.estimated_usage.llm.total.tokens` + +Each of these metrics has `ml_app`, `model_server`, `model_provider`, `model_name`, and `evaluation_name` tags, allowing you to pinpoint specific applications, models, and evaluations contributing to your usage. + ## Provide topics for topic relevancy Providing topics allows you to use the [topic relevancy][4] evaluation. diff --git a/content/en/llm_observability/setup/sdk/python.md b/content/en/llm_observability/setup/sdk/python.md index fe85b2fd455a0..6f24aff74f0e9 100644 --- a/content/en/llm_observability/setup/sdk/python.md +++ b/content/en/llm_observability/setup/sdk/python.md @@ -412,7 +412,7 @@ The `LLMObs.annotate()` method accepts the following arguments: `tags` : optional - _dictionary_ -
A dictionary of JSON serializable key-value pairs that users can add as tags regarding the span's context (`session`, `environment`, `system`, `versioning`, etc.). For more information about tags, see [Getting Started with Tags][9]. +
A dictionary of JSON serializable key-value pairs that users can add as tags on the span. Example keys: `session`, `env`, `system`, and `version`. For more information about tags, see [Getting Started with Tags][9]. ### Example @@ -468,6 +468,55 @@ def similarity_search(): {{< /code-block >}} +### Annotating auto-instrumented spans + +The SDK's `LLMObs.annotate_context()` method returns a context manager that can be used to modify all auto-instrumented spans started while the annotation context is active. + +#### Arguments + +The `LLMObs.annotation_context()` method accepts the following arguments: + +`name` +: optional - _str_ +
Name that overrides the span name for any auto-instrumented spans that are started within the annotation context. + +`prompt` +: optional - _dictionary_ +
A dictionary that represents the prompt used for an LLM call in the following format:
`{"template": "...", "id": "...", "version": "...", "variables": {"variable_1": "...", ...}}`.
You can also import the `Prompt` object from `ddtrace.utils` and pass it in as the `prompt` argument. **Note**: This argument only applies to LLM spans. + +`tags` +: optional - _dictionary_ +
A dictionary of JSON serializable key-value pairs that users can add as tags on the span. Example keys: `session`, `env`, `system`, and `version`. For more information about tags, see [Getting Started with Tags][9]. + +#### Example + +{{< code-block lang="python" >}} +from ddtrace.llmobs import LLMObs +from ddtrace.llmobs.decorators import workflow + +@workflow +def rag_workflow(user_question): + context_str = retrieve_documents(user_question).join(" ") + + with LLMObs.annotation_context( + prompt = Prompt( + variables = { + "question": user_question, + "context": context_str, + }, + template = "Please answer the..." + ), + tags = { + "retrieval_strategy": "semantic_similarity" + }, + name = "augmented_generation" + ): + completion = openai_client.chat.completions.create(...) + return completion.choices[0].message.content + +{{< /code-block >}} + + ## Evaluations The LLM Observability SDK provides the methods `LLMObs.export_span()` and `LLMObs.submit_evaluation()` to help your traced LLM application submit evaluations to LLM Observability. diff --git a/content/en/logs/explorer/live_tail.md b/content/en/logs/explorer/live_tail.md index 4e72b9adafa97..314f7bfb1311d 100644 --- a/content/en/logs/explorer/live_tail.md +++ b/content/en/logs/explorer/live_tail.md @@ -28,8 +28,6 @@ In the [Log Explorer][3], choose the Live Tail option in the timerange to query {{< img src="logs/explorer/live_tail/livetail.mp4" alt="Log Live Tail" video=true style="width:100%;" >}} -Contrary to queries on indexed logs happening in the [Log Explorer][3], queries in the Live Tail do *not* require that you [declare a facet][5] beforehand. - **Note**: For the sake of readability, the Live Tail output is sampled when too many logs matching the query are flowing in. The sampling applied is uniformly random, so that your Live Tail logs are statistically representative of your actual log throughput. Scope your query down with additional search filters if you need visibility on every single log flowing in. ## Further Reading diff --git a/content/en/logs/guide/azure-logging-guide.md b/content/en/logs/guide/azure-logging-guide.md index f1dcd97442c0f..c1c8f1fd4d9c2 100644 --- a/content/en/logs/guide/azure-logging-guide.md +++ b/content/en/logs/guide/azure-logging-guide.md @@ -7,6 +7,9 @@ further_reading: - link: "/logs/guide/reduce_data_transfer_fees" tag: "Guide" text: "How to send logs to Datadog while reducing data transfer fees" +- link: "https://github.com/Azure-Samples/terraform-azure-datadog-log-forwarder" + tag: "External Site" + text: "Terraform Azure Datadog Log Forwarder" --- ## Overview @@ -372,4 +375,4 @@ Once you have an App Registration configured, you can [create a log archive][3] [3]: /logs/log_configuration/archives/ [4]: /logs/guide/azure-native-logging-guide/ [5]: https://learn.microsoft.com/en-us/azure/partner-solutions/datadog/overview -[6]: /data_security/pci_compliance/?tab=logmanagement \ No newline at end of file +[6]: /data_security/pci_compliance/?tab=logmanagement diff --git a/content/en/logs/guide/send-aws-services-logs-with-the-datadog-lambda-function.md b/content/en/logs/guide/send-aws-services-logs-with-the-datadog-lambda-function.md index 4f24af88af06d..1894274d44383 100644 --- a/content/en/logs/guide/send-aws-services-logs-with-the-datadog-lambda-function.md +++ b/content/en/logs/guide/send-aws-services-logs-with-the-datadog-lambda-function.md @@ -140,7 +140,7 @@ Datadog can automatically configure triggers on the Datadog Forwarder Lambda fun | `logs:DeleteSubscriptionFilter` | Remove a Lambda trigger based on CloudWatch Log events | | `logs:DescribeSubscriptionFilters` | List the subscription filters for the specified log group. | -3. In the [AWS Integration page][44], select the AWS Account to collect logs from and click on the **Log Collection** tab. +3. In the [AWS Integration page][44], select the AWS Account to collect logs from and click on the **Log Collection** tab. {{< img src="logs/aws/aws_log_setup_step1.png" alt="The Log Collection tab of the AWS integration page for a specific AWS account with instructions to send AWS Services logs and a textbox to autosubscribe the Forwarder Lambda function by entering the ARN of the Forwarder Lambda function" popup="true" style="width:90%;" >}} 4. Enter the ARN of the Lambda created in the previous section and click **Add**. 5. Select the services from which you'd like to collect logs and click **Save**. To stop collecting logs from a particular service, deselect the log source. @@ -158,7 +158,7 @@ If you are collecting logs from a CloudWatch log group, configure the trigger to {{< tabs >}} {{% tab "AWS console" %}} -1. In the AWS console, go to **Lambda**. +1. In the AWS console, go to **Lambda**. 2. Click **Functions** and select the Datadog Forwarder. 3. Click **Add trigger** and select **CloudWatch Logs**. 4. Select the log group from the dropdown menu. @@ -173,13 +173,25 @@ If you are collecting logs from a CloudWatch log group, configure the trigger to For Terraform users, you can provision and manage your triggers using the [aws_cloudwatch_log_subscription_filter][1] resource. See sample code below. ```conf +data "aws_cloudwatch_log_group" "some_log_group" { + name = "/some/log/group" +} + +resource "aws_lambda_permission" "lambda_permission" { + action = "lambda:InvokeFunction" + function_name = "datadog-forwarder" # this is the default but may be different in your case + principal = "logs.amazonaws.com" # or logs.amazonaws.com.cn for China* + source_arn = data.aws_cloudwatch_log_group.some_log_group.arn +} + resource "aws_cloudwatch_log_subscription_filter" "datadog_log_subscription_filter" { name = "datadog_log_subscription_filter" - log_group_name = # for example, /aws/lambda/my_lambda_name + log_group_name = # for example, /some/log/group destination_arn = # for example, arn:aws:lambda:us-east-1:123:function:datadog-forwarder filter_pattern = "" } ``` +_*All use of Datadog Services in (or in connection with environments within) mainland China is subject to the disclaimer published in the [Restricted Service Locations](https://www.datadoghq.com/legal/restricted-service-locations/) section on our website._ [1]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_subscription_filter {{% /tab %}} diff --git a/content/en/logs/log_collection/javascript.md b/content/en/logs/log_collection/javascript.md index 0d212b3871ab3..5e3735f2a74a3 100644 --- a/content/en/logs/log_collection/javascript.md +++ b/content/en/logs/log_collection/javascript.md @@ -23,7 +23,7 @@ With the browser logs SDK, you can send logs directly to Datadog from web browse **Datadog browser logs SDK**: Configure the SDK through [NPM](#npm) or use the [CDN async](#cdn-async) or [CDN sync](#cdn-sync) code snippets in the head tag. -**Supported browsers**: The browser logs SDK supports all modern desktop and mobile browsers including IE11. See the [browser support][4] table. +**Supported browsers**: The browser logs SDK supports all modern desktop and mobile browsers. See the [Browser Support][4] table. ### Choose the right installation method @@ -369,13 +369,15 @@ The following parameters are available to configure the Datadog browser logs SDK Options that must have a matching configuration when using the `RUM` SDK: -| Parameter | Type | Required | Default | Description | -|----------------------------------------| ------- | -------- | ------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `trackSessionAcrossSubdomains` | Boolean | No | `false` | Preserve the session across subdomains for the same site. | -| `useSecureSessionCookie` | Boolean | No | `false` | Use a secure session cookie. This disables logs sent on insecure (non-HTTPS) connections. | -| `usePartitionedCrossSiteSessionCookie` | Boolean | No | `false` | Use a partitioned secure cross-site session cookie. This allows the logs SDK to run when the site is loaded from another one (iframe). Implies `useSecureSessionCookie`. | -| `useCrossSiteSessionCookie` | Boolean | No | `false` | **Deprecated**, see `usePartitionedCrossSiteSessionCookie`. | -| `usePciIntake` | Boolean | No | `false` | To forward logs to the [PCI-compliant intake][16], set to `true`. The PCI-compliant intake is only available for Datadog organizations in the US1 site. If `usePciIntake` is set to `true` and the site is not US1 (datadoghq.com), logs are sent to the default intake. | + +| Parameter | Type | Required | Default | Description | +| -------------------------------------- | ------------------------------- | -------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `sessionPersistence` | `"cookie"` or `"local-storage"` | No | `"cookie"` | Which storage strategy to use for persisting sessions. Can be either `cookie` or `local-storage`. | +| `trackAnonymousUser` | Boolean | No | `true` | Enables collection of anonymous user id across sessions. | +| `trackSessionAcrossSubdomains` | Boolean | No | `false` | Preserve the session across subdomains for the same site. | +| `useSecureSessionCookie` | Boolean | No | `false` | Use a secure session cookie. This disables logs sent on insecure (non-HTTPS) connections. | +| `usePartitionedCrossSiteSessionCookie` | Boolean | No | `false` | Use a partitioned secure cross-site session cookie. This allows the logs SDK to run when the site is loaded from another one (iframe). Implies `useSecureSessionCookie`. | +| `usePciIntake` | Boolean | No | `false` | To forward logs to the [PCI-compliant intake][16], set to `true`. The PCI-compliant intake is only available for Datadog organizations in the US1 site. If `usePciIntake` is set to `true` and the site is not US1 (datadoghq.com), logs are sent to the default intake. | ## Usage diff --git a/content/en/logs/log_configuration/archives.md b/content/en/logs/log_configuration/archives.md index 23e969399ed0b..682a08ddbe564 100644 --- a/content/en/logs/log_configuration/archives.md +++ b/content/en/logs/log_configuration/archives.md @@ -271,6 +271,18 @@ Archiving and [Rehydration][1] only supports the following access tiers: If you wish to rehydrate from archives in another access tier, you must first move them to one of the supported tiers above. +[1]: /logs/archives/rehydrating/ +{{% /tab %}} +{{% tab "Google Cloud Storage" %}} + +[Rehydration][1] only supports the following storage classes: + +- Standard +- Nearline +- Coldline + +If you wish to rehydrate from archives in another storage class, you must first move them to one of the supported classes above. + [1]: /logs/archives/rehydrating/ {{% /tab %}} {{< /tabs >}} diff --git a/content/en/logs/log_configuration/flex_logs.md b/content/en/logs/log_configuration/flex_logs.md index 826fa8f4aee32..c9b3dac969c78 100644 --- a/content/en/logs/log_configuration/flex_logs.md +++ b/content/en/logs/log_configuration/flex_logs.md @@ -114,7 +114,7 @@ To disable Flex Logs: 1. Remove Flex Storage from each index where Flex Logs is enabled. 1. Navigate back to the [Flex Logs Control][5] page. -1. Click **Disable Flex Logs**. +1. Click the gear icon and select **Disable Flex Logs**. ## Upgrade and downgrade Flex Logs compute @@ -206,7 +206,7 @@ The following list is an example of log sources that are good candidates for sen ### Flex Logs for multiple-organization accounts -For each organization in which you want Flex Logs, you must enable a compute size per organization. Compute sizes cannot be shared across organizations. +For each organization in which you want Flex Logs, you must enable a compute size per organization. Only one compute can be used per organization, and compute sizes cannot be shared across organizations. Starter and scalable compute cannot be used simultaneously in an organization. Datadog generally recommends Flex Logs scalable compute sizes (XS, S, M, and L) for organizations with large log volumes. In a multi-organization setup, there are often many organizations with lower log volumes, so for these organizations, Datadog recommends the Starter compute size for Flex Logs. diff --git a/content/en/logs/workspaces/export.md b/content/en/logs/workspaces/export.md new file mode 100644 index 0000000000000..a7babcadb5299 --- /dev/null +++ b/content/en/logs/workspaces/export.md @@ -0,0 +1,38 @@ +--- +title: Export Log Workspaces Datasets +private: true +further_reading: +- link: "/logs/workspaces/" + tag: "Documentation" + text: "Learn more about Log Workspaces" +- link: "/dashboards/widgets/list/" + tag: "Documentation" + text: "List Widget" +--- + +## Overview + +Export your Workspaces cell as a dataset to dashboards and notebooks. The export feature allows you to create datasets and display them in dashboards and notebooks for analysis or reporting. + +## Export a Workspaces cell as a dataset + +From your workspace: + +1. Find the cell you want to export as a dataset. +2. Click **Save to Dashboard** or select **Save to Notebook** from the dropdown menu. +3. In the modal, choose an existing dashboard or notebook, or add the cell as a dataset to a new dashboard or notebook. +4. Click **Save**. +5. (Optional) Rename the source name and click **Update**. The source name defaults to ` - `. +6. A purple banner indicates a cell that is exported. + +{{< img src="logs/workspace/export/example_exported_dataset.png" alt="Example Workspaces cell that has already been exported, showing a purple banner" style="width:100%;" >}} + +Saving a cell to a dashboard creates a real-time sync between the cell and every widget it's saved on. Any changes made to an exported dataset are also reflected in the associated dashboards or notebooks. + +From the dashboard or notebook, you have the option of adjusting the columns and customizing widget options. However, You can only change dataset configurations from Log Workspaces. To edit the source, click **Edit in Log Workspaces** from the widget graph editor. + +{{< img src="/logs/workspace/export/link_to_workspace_from_dashboard.png" alt="Options in the graph editor to adjust columns and link out to the source Workspace" style="width:100%;" >}} + +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} \ No newline at end of file diff --git a/content/en/logs/workspaces/sql_reference.md b/content/en/logs/workspaces/sql_reference.md index 275057a6f8a64..41f6daa270dad 100644 --- a/content/en/logs/workspaces/sql_reference.md +++ b/content/en/logs/workspaces/sql_reference.md @@ -11,7 +11,7 @@ further_reading: SQL in [Analysis cells][1] allows you to analyze and manipulate data within Log Workspaces. This documentation covers the SQL support available in Log Workspaces and includes: - [Syntax compatible with PostgreSQL](#syntax) - [SQL functions](#functions) - +- [Window functions](#window-functions) {{< img src="/logs/workspace/sql_reference/sql_syntax_analysis_cell.png" alt="Example workspace cell with SQL syntax" style="width:100%;" >}} @@ -39,12 +39,19 @@ WHERE department = 'Sales' AND name LIKE 'J%' {{< /code-block >}} | ELSE 'Standard Order' END AS order_type FROM orders {{< /code-block >}} | +| `WINDOW` | Performs a calculation across a set of table rows that are related to the current row. | {{< code-block lang="sql" >}}SELECT + timestamp, + service_name, + cpu_usage_percent, + AVG(cpu_usage_percent) OVER (PARTITION BY service_name ORDER BY timestamp ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) AS moving_avg_cpu +FROM + cpu_usage_data {{< /code-block >}} | | Arithmetic Operations | Performs basic calculations using operators like `+`, `-`, `*`, `/`. | {{< code-block lang="sql" >}}SELECT price, tax, (price * tax) AS total_cost FROM products {{< /code-block >}} | ## Functions -The following SQL functions are supported: +The following SQL functions are supported. For Window function, see the separate [Window function](#window-functions) section in this documentation. | Function | Return Type | Description | |------------------------|---------------------------------|-------------------------------------------------------------------------| @@ -60,7 +67,9 @@ The following SQL functions are supported: | `upper(string s)` | string | Returns the string as uppercase. | | `abs(numeric n)` | numeric | Returns the absolute value. | | `coalesce(args a)` | typeof first non-null a OR null | Returns the first non-null value or null if all are null. | - +| `cast(value AS type)` | type | Converts the given value to the specified data type. | +| `length(string s)` | integer | Returns the number of characters in the string. | +| `INTERVAL value unit` | interval | Represents a time duration specified in a given unit. | {{% collapse-content title="Examples" level="h3" %}} @@ -136,15 +145,56 @@ FROM accounts ### `COALESCE` {{< code-block lang="sql" >}} - SELECT COALESCE(phone_number, email) AS contact_info +SELECT COALESCE(phone_number, email) AS contact_info FROM users {{< /code-block >}} +### `CAST` +{{< code-block lang="sql" >}} +SELECT + CAST(order_id AS VARCHAR) AS order_id_string, + 'Order-' || CAST(order_id AS VARCHAR) AS order_label +FROM + orders +{{< /code-block >}} + +### `LENGTH` +{{< code-block lang="sql" >}} +SELECT + customer_name, + LENGTH(customer_name) AS name_length +FROM + customers +{{< /code-block >}} + +### `INTERVAL` +{{< code-block lang="sql" >}} +SELECT + TIMESTAMP '2023-10-01 10:00:00' + INTERVAL '30 days' AS future_date +{{< /code-block >}} + {{% /collapse-content %}} +## Window functions + +This table provides an overview of the supprted window functions. For comprehensive details and examples, see to the [PostgreSQL documentation][2]. + +| Function | Return Type | Description | +|-------------------------|-------------------|------------------------------------------------------------------------| +| `OVER` | N/A | Defines a window for a set of rows for other window functions to operate on. | +| `PARTITION BY` | N/A | Divides the result set into partitions, specifically for applying window functions. | +| `RANK()` | integer | Assigns a rank to each row within a partition, with gaps for ties. | +| `ROW_NUMBER()` | integer | Assigns a unique sequential number to each row within a partition. | +| `LEAD(column n)` | typeof column | Returns the value from the next row in the partition. | +| `LAG(column n)` | typeof column | Returns the value from the previous row in the partition. | +| `FIRST_VALUE(column n)` | typeof column | Returns the first value in an ordered set of values. | +| `LAST_VALUE(column n)` | typeof column | Returns the last value in an ordered set of values. | +| `NTH_VALUE(column n, offset)`| typeof column | Returns the value at the specified offset in an ordered set of values. | + ## Further reading {{< partial name="whats-next/whats-next.html" >}} -[1]: /logs/workspaces/#analysis-cell \ No newline at end of file +[1]: /logs/workspaces/#analysis-cell +[2]: https://www.postgresql.org/docs/current/functions-window.html \ No newline at end of file diff --git a/content/en/mobile/_index.md b/content/en/mobile/_index.md index 0b197f5b88506..91d64f0aed1d1 100644 --- a/content/en/mobile/_index.md +++ b/content/en/mobile/_index.md @@ -19,7 +19,7 @@ further_reading: text: "Getting started with the Datadog mobile app" --- -The Datadog Mobile app enables you to view alerts from Datadog on your mobile device. When receiving an alert via Slack, e-mail, Pagerduty or other pager apps, you'll be able to investigate issues by opening monitor graphs and dashboards on your mobile device. +The Datadog Mobile app enables you to view alerts from Datadog on your mobile device. When receiving an alert through On-Call, Slack, or email, you can investigate issues by opening monitor graphs and dashboards on your mobile device. ## Installing @@ -109,7 +109,7 @@ For more information on configuring Incident Notification Rules, see the [Incide View your [open incidents][12] from your mobile home screen with Datadog widgets. -To dive deeper into issues, tap any open incident displayed in the widget to have it open with more details in the Datadog mobile app. +To dive deeper into issues, tap any open incident displayed in the widget to have it open with more details in the Datadog mobile app. Also, you can customize your Open Incidents widgets by filtering on: @@ -174,7 +174,7 @@ Also, you can customize your Open Incidents widgets by filtering on: #### Display Open Incidents from multiple organizations -You can display open incidents from multiple organizations on your mobile home screen. +You can display open incidents from multiple organizations on your mobile home screen. {{< tabs >}} {{% tab "iOS" %}} @@ -189,7 +189,7 @@ You can display open incidents from multiple organizations on your mobile home s 2. From the configuration screen, tap **Organization**. 3. Select a new organization (you may need to sign in). 4. Size the widget to fit your preference. -5. Tap **Save** or **Apply**. +5. Tap **Save** or **Apply**. {{% /tab %}} @@ -263,8 +263,8 @@ You can also specify a dashboard that opens by default when you tap on an SLOs w - Long press on the widget to configure. - Tap "Edit Widget." - Tap "Choose" next to the SLO label to choose an SLO to track. -- Depending on the SLO chosen, a "Timeframe" label may appear. Tap "Choose" next to the "Timeframe" label to choose the SLO timeframe. -- Tap "Choose" next to the "Dashboard to open" label to choose a dashboard that opens when the SLOs widget is tapped. +- Depending on the SLO chosen, a "Timeframe" label may appear. Tap "Choose" next to the "Timeframe" label to choose the SLO timeframe. +- Tap "Choose" next to the "Dashboard to open" label to choose a dashboard that opens when the SLOs widget is tapped. - Tap out of the widget to validate your selection and exit the configuration screen. @@ -284,7 +284,7 @@ You can also specify a dashboard that opens by default when you tap on an SLOs w #### Display SLOs from multiple organizations -You can display SLOs from multiple organizations on your mobile home screen. +You can display SLOs from multiple organizations on your mobile home screen. {{< tabs >}} {{% tab "iOS" %}} @@ -299,7 +299,7 @@ All organizations you've logged into are displayed in the configuration screen. - From the configuration screen, tap "Organization". - Select a new organization (You might need to sign in). - Size the widget to fit your preference. -- Tap "Save" or "Apply". +- Tap "Save" or "Apply". {{% /tab %}} @@ -382,7 +382,7 @@ View your [monitors][16] from your home screen with Datadog widgets. Tap any cel #### Display Monitors from multiple organizations -You can display Monitors from multiple organizations within the same widget. +You can display Monitors from multiple organizations within the same widget. {{< tabs >}} {{% tab "iOS" %}} @@ -397,7 +397,7 @@ All organizations you've logged-in are displayed in the configuration screen. If - From the configuration screen, tap "Organization". - Select a new organization (you might need to sign in). - Edit the widget to fit your preference. -- Tap "Save" or "Apply". +- Tap "Save" or "Apply". {{% /tab %}} diff --git a/content/en/monitors/types/outlier.md b/content/en/monitors/types/outlier.md index 80e9f56bd18bd..bd0af04f7248b 100644 --- a/content/en/monitors/types/outlier.md +++ b/content/en/monitors/types/outlier.md @@ -37,7 +37,7 @@ The outlier monitor requires a metric with a group (hosts, availability zones, p ### Set alert conditions * Trigger a separate alert for each outlier `` -* during the last `5 minutes`, `15 minutes`, `1 hour`, etc. or `custom` to set a value between 1 minute and 24 hours. +* during the last `5 minutes`, `15 minutes`, `1 hour`, and so on, or use `custom` to set a value between 1 minute and 1 year. * Using algorithm `MAD`, `DBSCAN`, `scaledMAD`, or `scaledDBSCAN` * tolerance: `0.33`, `1.0`, `3.0`, etc. * %: `10`, `20`, `30`, etc. (only for `MAD` algorithms) diff --git a/content/en/network_monitoring/network_path/setup.md b/content/en/network_monitoring/network_path/setup.md index ea3748670628a..d2e61cdebbffe 100644 --- a/content/en/network_monitoring/network_path/setup.md +++ b/content/en/network_monitoring/network_path/setup.md @@ -23,8 +23,7 @@ Setting up Network Path involves configuring your Linux environment to monitor a ## Prerequisites -- Agent version `7.59` or higher is required. -- [CNM][1] must be enabled. +[CNM][1] must be enabled. **Note**: If your network configuration restricts outbound traffic, follow the setup instructions on the [Agent proxy configuration][2] documentation. @@ -32,6 +31,11 @@ Setting up Network Path involves configuring your Linux environment to monitor a ### Monitor individual paths +{{< tabs >}} +{{% tab "Linux" %}} + +Agent `v7.59+` is required. + Manually configure individual paths by specifying the exact endpoint you want to test. This allows you to target specific network routes for monitoring. 1. Enable the `system-probe` traceroute module in `/etc/datadog-agent/system-probe.yaml` by adding the following: @@ -126,8 +130,35 @@ Manually configure individual paths by specifying the exact endpoint you want to 3. Restart the Agent after making these configuration changes to start seeing network paths. +[4]: https://github.com/DataDog/datadog-agent/blob/main/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example + +{{% /tab %}} +{{% tab "Windows" %}} + +Agent `v7.61+` is required. + +**Note**: Windows only supports TCP traceroutes. + +In Windows environments, the Agent uses UDP by default to monitor individual paths. If the protocol is not specified in the configuration, the Agent attempts a UDP traceroute, and any errors are logged. To work around this, ensure the protocol is set to TCP. For example: + +```yaml +init_config: + min_collection_interval: 60 # in seconds, default 60 seconds +instances: + - hostname: api.datadoghq.eu # endpoint hostname or IP + protocol: TCP + port: 443 # optional port number, default is 80 +``` +{{% /tab %}} +{{< /tabs >}} + ### Network traffic paths (experimental) +{{< tabs >}} +{{% tab "Linux" %}} + +Agent `v7.59+` is required. + **Note**: Network traffic paths is experimental and is not yet stable. Do not deploy network traffic paths widely in a production environment. Configure network traffic paths to allow the Agent to automatically discover and monitor network paths based on actual network traffic, without requiring you to specify endpoints manually. @@ -170,7 +201,17 @@ Configure network traffic paths to allow the Agent to automatically discover and 3. Restart the Agent after making these configuration changes to start seeing network paths. -**Note**: Network path is only supported for Linux environments. +[3]: https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml#L1697 + +{{% /tab %}} +{{% tab "Windows" %}} + +Agent `v7.61+` is required. + +For network traffic paths on Windows environments, only detected TCP connections are shown. + +{{% /tab %}} +{{< /tabs >}} ## Further Reading @@ -178,5 +219,5 @@ Configure network traffic paths to allow the Agent to automatically discover and [1]: /network_monitoring/cloud_network_monitoring/setup/ [2]: https://docs.datadoghq.com/agent/configuration/proxy/?tab=linux -[3]: https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml#L1645 -[4]: https://github.com/DataDog/datadog-agent/blob/main/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example + + diff --git a/content/en/opentelemetry/interoperability/instrumentation_libraries.md b/content/en/opentelemetry/interoperability/instrumentation_libraries.md index 7d3b6626c4684..d210eebe67ea5 100644 --- a/content/en/opentelemetry/interoperability/instrumentation_libraries.md +++ b/content/en/opentelemetry/interoperability/instrumentation_libraries.md @@ -202,8 +202,10 @@ import ( "log" "net/http" - ddotel "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry" - ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + ddotel "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry" // 1.x + ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/opentelemetry // 2.x "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" diff --git a/content/en/opentelemetry/interoperability/otlp_ingest_in_the_agent.md b/content/en/opentelemetry/interoperability/otlp_ingest_in_the_agent.md index 43a1281012b34..488e2a90e0506 100644 --- a/content/en/opentelemetry/interoperability/otlp_ingest_in_the_agent.md +++ b/content/en/opentelemetry/interoperability/otlp_ingest_in_the_agent.md @@ -204,6 +204,35 @@ This enables each protocol in the default port (`4317` for OTLP/gRPC and `4318` This enables each protocol in the default port (`4317` for OTLP/gRPC and `4318` for OTLP/HTTP). +[1]: /agent/kubernetes/?tab=helm +{{% /tab %}} +{{% tab "Kubernetes (Operator)" %}} + +1. Follow the [Kubernetes Agent setup][1]. + +2. Enable the preferred protocol in your Operator's manifest: + + For gRPC: + ```yaml + features: + otlp: + receiver: + protocols: + grpc: + enabled: true + ``` + For HTTP: + ```yaml + features: + otlp: + receiver: + protocols: + http: + enabled: true + ``` + +This enables each protocol in the default port (`4317` for OTLP/gRPC and `4318` for OTLP/HTTP). + [1]: /agent/kubernetes/?tab=helm {{% /tab %}} {{% tab "AWS Lambda" %}} @@ -278,4 +307,4 @@ env: [4]: https://github.com/DataDog/datadog-agent/blob/main/CHANGELOG.rst [5]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/config.md [6]: https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml -[10]: /opentelemetry/runtime_metrics/ \ No newline at end of file +[10]: /opentelemetry/runtime_metrics/ diff --git a/content/en/opentelemetry/otlp_logs.md b/content/en/opentelemetry/otlp_logs.md index 0c2da213ed597..177792e28bb84 100644 --- a/content/en/opentelemetry/otlp_logs.md +++ b/content/en/opentelemetry/otlp_logs.md @@ -12,6 +12,10 @@ further_reading: text: "OpenTelemetry Protocol Exporter" --- +{{< callout header="false" btn_hidden="true">}} + The Datadog OTLP logs intake endpoint is in Preview. +{{< /callout >}} + {{< site-region region="ap1,gov" >}}
Datadog OTLP logs intake endpoint is not supported for your selected Datadog site ({{< region-param key="dd_site_name" >}}).
{{< /site-region >}} diff --git a/content/en/product_analytics/session_replay/browser/_index.md b/content/en/product_analytics/session_replay/browser/_index.md index 754c2b5019533..ef29cb24b579b 100644 --- a/content/en/product_analytics/session_replay/browser/_index.md +++ b/content/en/product_analytics/session_replay/browser/_index.md @@ -32,7 +32,7 @@ The Session Replay recorder is part of the RUM Browser SDK. The recorder takes a Datadog then rebuilds the web page and re-applies the recorded events at the appropriate time in the replay view. Session Replay follows the same 30 day retention policy as normal RUM sessions. -The Session Replay recorder supports all browsers supported by the RUM Browser SDK with the exception of IE11. For more information, see the [browser support table][3]. +The Session Replay recorder supports all browsers supported by the RUM Browser SDK. For more information, see the [Browser Support table][3]. To reduce Session Replay's network impact and ensure the Session Replay recorder has minimal overhead on your application's performance, Datadog compresses the data prior to sending it. Datadog also reduces the load on a browser's UI thread by delegating most of the CPU-intensive work (such as compression) to a dedicated web worker. The expected network bandwidth impact is less than 100kB/min. diff --git a/content/en/profiler/enabling/go.md b/content/en/profiler/enabling/go.md index 28bd5ed67434a..f5042f1210f5f 100644 --- a/content/en/profiler/enabling/go.md +++ b/content/en/profiler/enabling/go.md @@ -38,15 +38,16 @@ To begin profiling applications: 2. Get `dd-trace-go` using the command: ```shell - go get gopkg.in/DataDog/dd-trace-go.v1/profiler + go get gopkg.in/DataDog/dd-trace-go.v1/profiler # v1 + # go get github.com/DataDog/dd-trace-go/v2/profiler # v2 ``` - **Note**: Profiler is available in the `dd-trace-go` library for versions 1.23.0+. -3. Import the [profiler][6] at the start of your application: +3. Import the [profiler][6] ([or profiler v2][21]) at the start of your application: - ```Go - import "gopkg.in/DataDog/dd-trace-go.v1/profiler" + ```go + import "gopkg.in/DataDog/dd-trace-go.v1/profiler" // 1.x + // "github.com/DataDog/dd-trace-go/v2/profiler" // 2.x ``` 4. Add the following snippet to start the profiler: @@ -80,7 +81,7 @@ To begin profiling applications: 6. After a minute or two, visualize your profiles in the [Datadog APM > Profiler page][10]. -**Note**: By default, only the CPU and Heap profiles are enabled. Use [profiler.WithProfileTypes][11] to enable additional [profile types][12]. +**Note**: By default, only the CPU and Heap profiles are enabled. Use [profiler.WithProfileTypes][11] to enable additional [profile types][12]. For relevant v2 documentation, view [profiler.WithProfileTypes][22] and [profile types][23]. If you automatically instrument your Go application with [Orchestrion][20], it adds the continuous profiler code to your application. To enable the profiler at run time, set the environment variable `DD_PROFILING_ENABLED=true`. @@ -157,4 +158,7 @@ The [Getting Started with Profiler][17] guide takes a sample service with a perf [17]: /getting_started/profiler/ [18]: /profiler/enabling/supported_versions/ [19]: https://app.datadoghq.com/account/settings/agent/latest?platform=overview -[20]: /tracing/trace_collection/automatic_instrumentation/dd_libraries/go \ No newline at end of file +[20]: /tracing/trace_collection/automatic_instrumentation/dd_libraries/go +[21]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/profiler#pkg-constants +[22]:https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/profiler#WithProfileTypes +[23]:https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/profiler#ProfileType \ No newline at end of file diff --git a/content/en/profiler/guide/isolate-outliers-in-monolithic-services.md b/content/en/profiler/guide/isolate-outliers-in-monolithic-services.md index d138a18a8bb04..8523c78f38760 100644 --- a/content/en/profiler/guide/isolate-outliers-in-monolithic-services.md +++ b/content/en/profiler/guide/isolate-outliers-in-monolithic-services.md @@ -110,7 +110,7 @@ pprof.Do(ctx, pprof.Labels("customer_name", ), func(context.Context) { }) ``` -To specify which label keys you want to use for filtering, add the [WithCustomProfilerLabelKeys][2] option when starting the profiler: +To specify which label keys you want to use for filtering, add the [WithCustomProfilerLabelKeys][2] (or [WithCustomProfilerLabelKeys v2][3]) option when starting the profiler: ```go profiler.Start( @@ -123,6 +123,7 @@ Then, open CPU or goroutine profiles for your service and select the `customer_n [1]: https://pkg.go.dev/runtime/pprof#Do [2]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/profiler#WithCustomProfilerLabelKeys +[3]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/profiler#WithCustomProfilerLabelKeys {{< /programming-lang >}} {{< /programming-lang-wrapper >}} diff --git a/content/en/real_user_monitoring/browser/setup/client.md b/content/en/real_user_monitoring/browser/setup/client.md index 291161eb94c13..624da37ea2e32 100644 --- a/content/en/real_user_monitoring/browser/setup/client.md +++ b/content/en/real_user_monitoring/browser/setup/client.md @@ -21,7 +21,7 @@ The Datadog Browser SDK can be used to instrument your application for both [Rea After your applications have been manually instrumented, you can begin managing your RUM and Error Tracking configurations per application in Datadog. -The Browser SDK supports all modern desktop and mobile browsers including IE11. For more information, see the [Browser Support][3] table. +The Browser SDK supports all modern desktop and mobile browsers. For more information, see the [Browser Support][3] table. ## Setup @@ -2062,21 +2062,27 @@ Allows you to control RUM views creation. See [override default RUM view names][ `trackUserInteractions` : Optional
**Type**: Boolean
-**Default**: `false`
+**Default**: `true`
Enables [automatic collection of users actions][6]. `trackResources` : Optional
**Type**: Boolean
-**Default**: `false`
+**Default**: `true`
Enables collection of resource events. `trackLongTasks` : Optional
**Type**: Boolean
-**Default**: `false`
+**Default**: `true`
Enables collection of long task events. +`trackAnonymousUser` +: Optional
+**Type**: Boolean
+**Default**: `true`
+Enables collection of anonymous user id across sessions. + `defaultPrivacyLevel` : Optional
**Type**: String
@@ -2170,6 +2176,12 @@ Allow capture of [untrusted events][18], for example in automated UI tests. Options that must have matching configuration when you are using the Logs Browser SDK: +`sessionPersistence` +: Optional
+**Type**: `"cookie" | "local-storage"`
+**Default**: `"cookie"`
+Which storage strategy to use for persisting sessions. Can be either `cookie` or `local-storage`. + `trackSessionAcrossSubdomains` : Optional
**Type**: Boolean
@@ -2188,17 +2200,11 @@ Use a secure session cookie. This disables RUM events sent on insecure (non-HTTP **Default**:`false`
Use a partitioned secure cross-site session cookie. This allows the RUM Browser SDK to run when the site is loaded from another one (iframe). Implies `useSecureSessionCookie`. -`useCrossSiteSessionCookie` -: Optional - **Deprecated**
-**Type**: Boolean
-**Default**:`false`
-See `usePartitionedCrossSiteSessionCookie`. - `allowFallbackToLocalStorage` -: Optional
+: Optional - **Deprecated**
**Type**: Boolean
**Default**: `false`
-Allows the use of `localStorage` when cookies cannot be set. This enables the RUM Browser SDK to run in environments that do not provide cookie support. See [Monitor Electron Applications Using the Browser SDK][19] for a typical use-case. +Use `sessionPersistence` instead. [1]: /account_management/api-app-keys/#client-tokens [2]: /getting_started/site/ @@ -2343,6 +2349,12 @@ Allow capture of [untrusted events][12], for example in automated UI tests. Options that must have matching configuration when you are using the Logs Browser SDK: +`sessionPersistence` +: Optional
+**Type**: `"cookie" | "local-storage"`
+**Default**: `"cookie"`
+Which storage strategy to use for persisting sessions. Can be either `cookie` or `local-storage`. + `trackSessionAcrossSubdomains` : Optional
**Type**: Boolean
@@ -2361,17 +2373,11 @@ Use a secure session cookie. This disables events sent on insecure (non-HTTPS) c **Default**:`false`
Use a partitioned secure cross-site session cookie. This allows the Browser SDK to run when the site is loaded from another one (iframe). Implies `useSecureSessionCookie`. -`useCrossSiteSessionCookie` -: Optional - **Deprecated**
-**Type**: Boolean
-**Default**:`false`
-See `usePartitionedCrossSiteSessionCookie`. - `allowFallbackToLocalStorage` -: Optional
+: Optional - **Deprecated**
**Type**: Boolean
**Default**: `false`
-Allows the use of `localStorage` when cookies cannot be set. This enables the Browser SDK to run in environments that do not provide cookie support. See [Monitor Electron Applications Using the Browser SDK][13] for a typical use case. +Use `sessionPersistence` instead. [1]: /account_management/api-app-keys/#client-tokens [2]: /getting_started/site/ diff --git a/content/en/real_user_monitoring/error_tracking/mobile/expo.md b/content/en/real_user_monitoring/error_tracking/mobile/expo.md index e2e8eb27986ed..c4b26aadf3e99 100644 --- a/content/en/real_user_monitoring/error_tracking/mobile/expo.md +++ b/content/en/real_user_monitoring/error_tracking/mobile/expo.md @@ -86,12 +86,7 @@ See the [RUM Debug Symbols][4] page to view all uploaded symbols. ## Limitations -{{< site-region region="us,us3,us5,eu,gov" >}} -Source maps, mapping files, and dSYM files are limited to **500** MB each. -{{< /site-region >}} -{{< site-region region="ap1" >}} -Source maps, mapping files, and dSYM files are limited to **500** MB each. -{{< /site-region >}} +Source maps and mapping files are limited in size to **500 MB** each, while dSYM files can go up to **2 GB** each. ## Test your implementation diff --git a/content/en/real_user_monitoring/error_tracking/mobile/flutter.md b/content/en/real_user_monitoring/error_tracking/mobile/flutter.md index ba5a8973bd094..4e1e7aa3ee5a5 100644 --- a/content/en/real_user_monitoring/error_tracking/mobile/flutter.md +++ b/content/en/real_user_monitoring/error_tracking/mobile/flutter.md @@ -113,12 +113,7 @@ See the [RUM Debug Symbols][10] page to view all uploaded symbols. ## Limitations -{{< site-region region="us,us3,us5,eu,gov" >}} -Source maps and dSYM files are limited to **500** MB each. -{{< /site-region >}} -{{< site-region region="ap1" >}} -Source maps and dSYM files are limited to **500** MB each. -{{< /site-region >}} +Mapping files are limited in size to **500 MB** each, while dSYM files can go up to **2 GB** each. ## Test your implementation diff --git a/content/en/real_user_monitoring/error_tracking/mobile/ios.md b/content/en/real_user_monitoring/error_tracking/mobile/ios.md index fa4a8628da31f..1e83096943f38 100644 --- a/content/en/real_user_monitoring/error_tracking/mobile/ios.md +++ b/content/en/real_user_monitoring/error_tracking/mobile/ios.md @@ -443,12 +443,8 @@ For more information, see [dSYMs commands][11]. ## Limitations -{{< site-region region="us,us3,us5,eu,gov" >}} -dSYM files are limited to **500** MB. -{{< /site-region >}} -{{< site-region region="ap1" >}} -dSYM files are limited to **500** MB. -{{< /site-region >}} +dSYM files are limited in size to **2 GB** each. + ## Test your implementation diff --git a/content/en/real_user_monitoring/error_tracking/mobile/kotlin-multiplatform.md b/content/en/real_user_monitoring/error_tracking/mobile/kotlin-multiplatform.md index 975510b9b5cd4..768c17e94d038 100644 --- a/content/en/real_user_monitoring/error_tracking/mobile/kotlin-multiplatform.md +++ b/content/en/real_user_monitoring/error_tracking/mobile/kotlin-multiplatform.md @@ -93,7 +93,7 @@ Use the following guides to see how you can upload mapping files (Android) or dS ### File sizing -Individual debug symbols (mapping files and dSYMs) are limited to 500 MB each. +Mapping files are limited in size to **500 MB** each, while dSYM files can go up to **2 GB** each. ### Collection diff --git a/content/en/real_user_monitoring/error_tracking/mobile/reactnative.md b/content/en/real_user_monitoring/error_tracking/mobile/reactnative.md index 75302c6d77399..d31a5a1aacbb4 100644 --- a/content/en/real_user_monitoring/error_tracking/mobile/reactnative.md +++ b/content/en/real_user_monitoring/error_tracking/mobile/reactnative.md @@ -96,12 +96,7 @@ See the [RUM Debug Symbols][16] page to view all uploaded symbols. ## Limitations -{{< site-region region="us,us3,us5,eu,gov" >}} -Source maps, mapping files, and dSYM files are limited to **500** MB each. -{{< /site-region >}} -{{< site-region region="ap1" >}} -Source maps, mapping files, and dSYM files are limited to **500** MB each. -{{< /site-region >}} +Source maps and mapping files are limited in size to **500 MB** each, while dSYM files can go up to **2 GB** each. To compute the size of your source maps and bundle, run the following command: diff --git a/content/en/real_user_monitoring/guide/browser-sdk-upgrade.md b/content/en/real_user_monitoring/guide/browser-sdk-upgrade.md index 1aa5575e536b0..8652c5199f72a 100644 --- a/content/en/real_user_monitoring/guide/browser-sdk-upgrade.md +++ b/content/en/real_user_monitoring/guide/browser-sdk-upgrade.md @@ -14,6 +14,74 @@ further_reading: Follow this guide to migrate between major versions of the Browser RUM and Browser Logs SDKs. See [the SDK documentation][26] for details on its features and capabilities. +## From v5 to v6 + +The main improvement v6 offers is the bundle size reduction. By dropping support for IE11 and leveraging lazy loading, the size of the RUM bundle has been reduced by 10% and the Logs bundle by nearly 9%. +Additionally, we've changed a few default initialization parameters and prepared for future improvements. + +Take notice of the below breaking changes as you upgrade your SDK. + +### Breaking changes + +#### Browser support + +Support for IE11 and other older browsers has been discontinued. Browsers must now support at least ES2018. +To use Datadog on older browsers, you can keep using Browser SDK v5 or earlier. + +#### Add tracestate header when using tracecontext propagator + +The default `tracecontext` propagator now sends a new `tracestate` header with additional metadata that allows better attribution of your traces. If you are using this propagator, then you need to allow this new header for all traced endpoints, in addition to the existing `traceparent` header: + +``` +Access-Control-Allow-Headers: traceparent, tracestate +``` + +#### Strongly type `site` option + +The `site` option now has a stronger type definition. If you use TypeScript you might have an error if you use a non-standard value. We recommend using [proxy][27] to send RUM data to a nonstandard URL. + +#### Tracking Actions, Resources and LongTask are now enabled by default + +User interactions, resources, and long tasks are now tracked by default. This change does not impact billing. To opt-out, set `trackUserInteractions`, `trackResources`, and `trackLongTasks` [initialization parameters][28] to `false`. + +#### Collect Long Animation Frames as Long Tasks + +On supported Browsers, [Long Animation Frames][35] are now collected instead of Long Tasks. The event type in the RUM Explorer is still `long_task`, but they will contain information about the long animation frame. + +#### Increased cookies expiration date + +To support anonymous user tracking, the session cookie (`_dd_s`) expiration is extended to 1 year. To opt-out, set `trackAnonymousUser` [initialization parameters][28] to `false`. + +#### Removed useCrossSiteSessionCookie initialization parameter + +`useCrossSiteSessionCookie` was deprecated and is now unsupported. Use `usePartitionedCrossSiteSessionCookie` [initialization parameters][28] instead. + +#### Lazy load Session Replay + +Session Replay module is now lazy-loaded using [dynamic imports][30]. This loads the module only for sessions sampled for Session Replay, reducing the bundle size for others. + +**If you're using the SDK through NPM**, ensure your bundler supports dynamic imports. Most modern bundlers support this feature out of the box, but some may require configuration changes. Refer to your bundler's documentation for guidance: [Webpack][31], [Esbuild][32], [Rollup][33], [Parcel][34]. + +**If you're using the SDK through a CDN**, there are no breaking changes. However, note that in addition to the main script being loaded (for example, +`datadog-rum.js`), the SDK will dynamically load an additional chunk when needed (for example, +`recorder-d7628536637b074ddc3b-datadog-rum.js`). + +#### Do not inject trace context for non-sampled traces + +The default value for the `traceContextInjection` initialization parameter has been updated to `sampled` to ensure backend services' sampling decisions are applied when traces are not sampled in the Browser SDK. See the [Connect RUM and Traces documentation][29] for more information. + +**Note**: If you're using a `traceSampleRate` of 100% (default), this change does not have any impact for you. + + + +### Future breaking changes + +#### Enabling compression for Datadog intake requests + +Compression for Datadog intake requests will be enabled by default in a future major version. +Datadog recommends that you opt-in to compression now using the `compressIntakeRequest` [initialization parameter][28]. +Since compression is performed in a Worker thread, configuring the Content Security Policy is necessary. See [CSP guidelines][18] for more information. + ## From v4 to v5 V5 introduces the following changes and more: @@ -337,3 +405,12 @@ The RUM Browser SDK no longer lets you specify the source of an error collected [24]: /help/ [26]: /real_user_monitoring/browser/ [25]: /real_user_monitoring/platform/connect_rum_and_traces#opentelemetry-support +[27]: /real_user_monitoring/guide/proxy-rum-data +[28]: /real_user_monitoring/browser/setup/#initialization-parameters +[29]: /real_user_monitoring/platform/connect_rum_and_traces/?tab=browserrum#:~:text=configure%20the%20traceContextInjection +[30]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/import +[31]: https://webpack.js.org/guides/code-splitting/#dynamic-imports +[32]: https://esbuild.github.io/api/#splitting +[33]: https://rollupjs.org/tutorial/#code-splitting +[34]: https://parceljs.org/features/code-splitting +[35]: https://developer.chrome.com/docs/web-platform/long-animation-frames#long-frames-api diff --git a/content/en/real_user_monitoring/guide/debug-symbols.md b/content/en/real_user_monitoring/guide/debug-symbols.md index ccf5b9c385b77..323af8220bbe7 100644 --- a/content/en/real_user_monitoring/guide/debug-symbols.md +++ b/content/en/real_user_monitoring/guide/debug-symbols.md @@ -22,6 +22,11 @@ Use the [RUM Debug Symbols page][1] to see if there are debug symbols for your a If there are no debug symbols for your application, [upload them][2]. +
+Ensure that the size of each debug symbol does not exceed the limit of **500 MB**, otherwise the upload is rejected. +For iOS dSYMs, individual files up to **2 GB** are supported. +
+ ### Debug symbol tags do not match Datadog relies on different tags to match debug symbols with stack traces. These tags vary for each type of application: diff --git a/content/en/real_user_monitoring/guide/monitor-capacitor-applications-using-browser-sdk.md b/content/en/real_user_monitoring/guide/monitor-capacitor-applications-using-browser-sdk.md index ce43b256da252..9eede49ccdfe7 100644 --- a/content/en/real_user_monitoring/guide/monitor-capacitor-applications-using-browser-sdk.md +++ b/content/en/real_user_monitoring/guide/monitor-capacitor-applications-using-browser-sdk.md @@ -20,7 +20,7 @@ You can install and configure the Datadog Browser SDK to start monitoring applic To install the Datadog Browser SDK to support Capacitor apps: 1. Set up and install [RUM Browser Monitoring][3], following the steps for CDN sync, CDN async, or npm. -2. Set the `allowFallbackToLocalStorage` parameter to `true` in the RUM initialization configuration. +2. Set the `sessionPersistence` parameter to `"local-storage"` in the RUM initialization configuration. **Note**: This setting allows Datadog to collect RUM data without relying on browser cookies. @@ -30,7 +30,7 @@ To install the Datadog Browser SDK to support Capacitor apps: clientToken: '', site: '', ... - allowFallbackToLocalStorage: true + sessionPersistence: "local-storage" }); ``` @@ -56,4 +56,4 @@ This means that any application using Capacitor to embed a landing page, then la [1]: https://capacitorjs.com/ [2]: /real_user_monitoring/browser/setup/ -[3]: /real_user_monitoring/explorer/ \ No newline at end of file +[3]: /real_user_monitoring/explorer/ diff --git a/content/en/real_user_monitoring/guide/monitor-electron-applications-using-browser-sdk.md b/content/en/real_user_monitoring/guide/monitor-electron-applications-using-browser-sdk.md index d83b5938dc5bd..a6262692d1a20 100644 --- a/content/en/real_user_monitoring/guide/monitor-electron-applications-using-browser-sdk.md +++ b/content/en/real_user_monitoring/guide/monitor-electron-applications-using-browser-sdk.md @@ -24,7 +24,7 @@ To install the Datadog Browser SDK to support Electron apps: 1. Set up and install [RUM Browser Monitoring][2] inside **every renderer process**, following the steps for CDN sync, CDN async, or npm. -2. Set the `allowFallbackToLocalStorage` parameter to `true` in the RUM initialization configuration of each renderer process, as shown below. +2. Set the `sessionPersistence` parameter to `"local-storage"` in the RUM initialization configuration of each renderer process, as shown below. **Note**: This setting allows Datadog to collect RUM data without relying on browser cookies. @@ -37,7 +37,7 @@ To install the Datadog Browser SDK to support Electron apps: clientToken: '', site: '', ... - allowFallbackToLocalStorage: true + sessionPersistence: "local-storage" }); ``` diff --git a/content/en/real_user_monitoring/guide/upload-javascript-source-maps.md b/content/en/real_user_monitoring/guide/upload-javascript-source-maps.md index ef4e65c046abe..bc930b4dc22e4 100644 --- a/content/en/real_user_monitoring/guide/upload-javascript-source-maps.md +++ b/content/en/real_user_monitoring/guide/upload-javascript-source-maps.md @@ -24,12 +24,7 @@ If your front-end JavaScript source code is minified, upload your source maps to Configure your JavaScript bundler such that when minifying your source code, it generates source maps that directly include the related source code in the `sourcesContent` attribute.
-{{< site-region region="us,us3,us5,eu" >}} -Ensure that the size of each source map augmented with the size of the related minified file does not exceed the limit of **300** MB. -{{< /site-region >}} -{{< site-region region="ap1,gov" >}} -Ensure that the size of each source map augmented with the size of the related minified file does not exceed the limit of **50** MB. -{{< /site-region >}} +Ensure that the size of each source map augmented with the size of the related minified file does not exceed the limit of **500 MB**.
See the following configurations for popular JavaScript bundlers. @@ -88,12 +83,7 @@ See the following example: ```
-{{< site-region region="us,us3,us5,eu" >}} -If the sum of the file size for javascript.364758.min.js and javascript.364758.js.map exceeds the the **300** MB limit, reduce it by configuring your bundler to split the source code into multiple smaller chunks. For more information, see Code Splitting with WebpackJS. -{{< /site-region >}} -{{< site-region region="ap1,gov" >}} -If the sum of the file size for javascript.364758.min.js and javascript.364758.js.map exceeds the the **50** MB limit, reduce it by configuring your bundler to split the source code into multiple smaller chunks. For more information, see Code Splitting with WebpackJS. -{{< /site-region >}} +If the sum of the file size for javascript.364758.min.js and javascript.364758.js.map exceeds the the 500 MB limit, reduce it by configuring your bundler to split the source code into multiple smaller chunks. For more information, see Code Splitting with WebpackJS.
## Upload your source maps diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/android/advanced_configuration.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/android/advanced_configuration.md index ee273dd3e650b..28ad2527c6e73 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/android/advanced_configuration.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/android/advanced_configuration.md @@ -49,6 +49,36 @@ In addition to [tracking views automatically][4], you can also track specific di {{% /tab %}} {{< /tabs >}} +### Notify the SDK that your view finished loading + +iOS RUM tracks the time it takes for your view to load. To notify the SDK that your view has finished loading, call the `addViewLoadingTime(override=)` method +through the `GlobalRumMonitor` instance. Call this method when your view is fully loaded and displayed to the user: + +{{< tabs >}} +{{% tab "Kotlin" %}} + ```kotlin + @OptIn(ExperimentalRumApi::class) + fun onViewLoaded() { + GlobalRumMonitor.get().addViewLoadingTime(override = false) + } + ``` +{{% /tab %}} +{{% tab "Java" %}} + ```java + @OptIn(markerClass = ExperimentalRumApi.class) + public void onViewLoaded() { + GlobalRumMonitor.get().addViewLoadingTime(override); + } + ``` +{{% /tab %}} +{{< /tabs >}} + +Use the `override` option to replace the previously calculated loading time for the current view. + +After the loading time is sent, it is accessible as `@view.loading_time` and is visible in the RUM UI. + +**Note**: This API is still experimental and might change in the future. + ### Add your own performance timing In addition to RUM's default attributes, you can measure where your application is spending its time by using the `addTiming` API. The timing measure is relative to the start of the current RUM view. For example, you can time how long it takes for your hero image to appear: diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/android/error_tracking.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/android/error_tracking.md index a79986ca0e774..c36bf34dd4170 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/android/error_tracking.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/android/error_tracking.md @@ -223,12 +223,7 @@ tasks["minify${variant}WithR8"].finalizedBy { tasks["uploadMapping${variant}"] } ## Limitations ### File sizing -{{< site-region region="us,us3,us5,eu,gov" >}} -Mapping files are limited to **500** MB. If your project has a mapping file larger than this, use one of the following options to reduce the file size: -{{< /site-region >}} -{{< site-region region="ap1" >}} -Mapping files are limited to **50** MB. If your project has a mapping file larger than this, use one of the following options to reduce the file size: -{{< /site-region >}} +Mapping files are limited in size to **500 MB** each. If your project has a mapping file larger than this, use one of the following options to reduce the file size: - Set the `mappingFileTrimIndents` option to `true`. This reduces your file size by 5%, on average. - Set a map of `mappingFilePackagesAliases`: This replaces package names with shorter aliases. **Note**: Datadog's stacktrace uses the same alias instead of the original package name, so it's better to use this option for third party dependencies. diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/flutter/advanced_configuration.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/flutter/advanced_configuration.md index 432d275859440..06dc64414cfcd 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/flutter/advanced_configuration.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/flutter/advanced_configuration.md @@ -179,14 +179,46 @@ A custom endpoint for sending RUM data. **Default**: `20.0` The sampling rate for telemetry data, such as errors and debug logs. -## Automatic view tracking +## Automatically track resources -If you are using Flutter Navigator v2.0, your setup for automatic view tracking differs depending on your routing middleware. See [Flutter Integrated Libraries][18] for instructions on how to integrate with [go_router][8], [AutoRoute][9], and [Beamer][10]. +Use the [Datadog Tracking HTTP Client][12] package to enable automatic tracking of resources and HTTP calls from your views. + +Add the package to your `pubspec.yaml` and add the following to your initialization file: + +```dart +final configuration = DatadogConfiguration( + // configuration + firstPartyHosts: ['example.com'], +)..enableHttpTracking() +``` + +**Note**: The Datadog Tracking HTTP Client modifies [`HttpOverrides.global`][27]. If you are using your own custom `HttpOverrides`, you may need to inherit from [`DatadogHttpOverrides`][28]. In this case, you do not need to call `enableHttpTracking`. Versions of `datadog_tracking_http_client` >= 1.3 check the value of `HttpOverrides.current` and use this for client creation, so you only need to make sure to initialize `HttpOverrides.global` prior to initializing Datadog. + +In order to enable Datadog [Distributed Tracing][29], you must set the `DatadogConfiguration.firstPartyHosts` property in your configuration object to a domain that supports distributed tracing. You can also modify the sampling rate for distributed tracing by setting the `tracingSamplingRate` on your `DatadogRumConfiguration`. + +- `firstPartyHosts` does not allow wildcards, but matches any subdomains for a given domain. For example, `api.example.com` matches `staging.api.example.com` and `prod.api.example.com`, not `news.example.com`. + +- `DatadogRumConfiguration.traceSampleRate` sets a default sampling rate of 20%. If you want all resources requests to generate a full distributed trace, set this value to `100.0`. ## Enrich user sessions Flutter RUM automatically tracks attributes such as user activity, views (using the `DatadogNavigationObserver`), errors, native crashes, and network requests (using the Datadog Tracking HTTP Client). See the [RUM Data Collection documentation][3] to learn about the RUM events and default attributes. You can further enrich user session information and gain finer control over the attributes collected by tracking custom events. +### Notify the SDK that your view finished loading + +iOS RUM tracks the time it takes for your view to load. To notify the SDK that your view has finished loading, call the `addViewLoadingTime` method on `DatadogRum`. +Call this method when your view is fully loaded and ready to be displayed to the user: + +```dart + DatadogSdk.instance.rum?.addViewLoadingTime(override); +``` + +Use the `override` option to replace the previously calculated loading time for the current view. + +After the loading time is sent, it is accessible as `@view.loading_time` and is visible in the RUM UI. + +**Note**: This API is still experimental and might change in the future. + ### Add your own performance timing In addition to RUM's default attributes, you can measure where your application is spending its time by using `DdRum.addTiming`. The timing measure is relative to the start of the current RUM view. @@ -457,4 +489,9 @@ if (DatadogSdk.instance.isFirstPartyHost(host)){ [21]: /real_user_monitoring/connect_rum_and_traces/?tab=browserrum#how-are-rum-resources-linked-to-traces [22]: https://github.com/openzipkin/b3-propagation#single-headers [23]: https://github.com/openzipkin/b3-propagation#multiple-headers -[24]: https://www.w3.org/TR/trace-context/#tracestate-header \ No newline at end of file +[24]: https://www.w3.org/TR/trace-context/#tracestate-header +[25]: https://pub.dev/packages/go_router +[26]: https://pub.dev/documentation/datadog_flutter_plugin/latest/datadog_flutter_plugin/ViewInfoExtractor.html +[27]: https://api.flutter.dev/flutter/dart-io/HttpOverrides/current.html +[28]: https://pub.dev/documentation/datadog_tracking_http_client/latest/datadog_tracking_http_client/DatadogTrackingHttpOverrides-class.html +[29]: /serverless/aws_lambda/distributed_tracing/ \ No newline at end of file diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/flutter/setup.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/flutter/setup.md index e574196e514e4..8ab7d1cf24e87 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/flutter/setup.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/flutter/setup.md @@ -166,7 +166,7 @@ This loads the CDN-delivered Datadog Browser SDKs for Logs and RUM. The synchron For more information on available configuration options, see the [DatadogConfiguration object documentation][4]. -To ensure the safety of your data, you must use a client token. You cannot use Datadog API keys to configure the Datadog Flutter Plugin. +To ensure the safety of your data, you must use a client token. You cannot use Datadog API keys to configure the Datadog [Flutter Plugin][5]. - If you are using RUM, set up a **Client Token** and **Application ID**. - If you are only using Logs, initialize the library with a client token. @@ -176,7 +176,7 @@ To ensure the safety of your data, you must use a client token. You cannot use D You can initialize the library using one of two methods in your `main.dart` file. -- Use `DatadogSdk.runApp` to automatically set up [Error Tracking][5]. +- Use `DatadogSdk.runApp` to automatically set up [Error Tracking][6]. ```dart await DatadogSdk.runApp(configuration, TrackingConsent.granted, () async { @@ -184,7 +184,7 @@ You can initialize the library using one of two methods in your `main.dart` file }) ``` -- You can also manually set up [Error Tracking][5] and resource tracking. `DatadogSdk.runApp` calls `WidgetsFlutterBinding.ensureInitialized`, so if you are not using `DatadogSdk.runApp`, you need to call this method prior to calling `DatadogSdk.instance.initialize`. +- You can also manually set up [Error Tracking][6] and resource tracking. `DatadogSdk.runApp` calls `WidgetsFlutterBinding.ensureInitialized`, so if you are not using `DatadogSdk.runApp`, you need to call this method prior to calling `DatadogSdk.instance.initialize`. ```dart WidgetsFlutterBinding.ensureInitialized(); @@ -243,6 +243,8 @@ The SDK changes its behavior according to the new value. For example, if the cur ## Automatically track views +If you are using Flutter Navigator v2.0, your setup for automatic view tracking differs depending on your routing middleware. See [Flutter Integrated Libraries][12] for instructions on how to integrate with [go_router][7], [AutoRoute][9], and [Beamer][10]. + ### Flutter Navigator v1 The Datadog Flutter Plugin can automatically track named routes using the `DatadogNavigationObserver` on your MaterialApp: @@ -262,7 +264,7 @@ If you are not using named routes, you can use `DatadogRouteAwareMixin` in conju ### Flutter Navigator v2 -If you are using Flutter Navigator v2.0, which uses the `MaterialApp.router` named constructor, the setup varies based on the routing middleware you are using, if any. Since [`go_router`][6] uses the same observer interface as Flutter Navigator v1, `DatadogNavigationObserver` can be added to other observers as a parameter to `GoRouter`. +If you are using Flutter Navigator v2.0, which uses the `MaterialApp.router` named constructor, the setup varies based on the routing middleware you are using, if any. Since [`go_router`][7] uses the same observer interface as Flutter Navigator v1, `DatadogNavigationObserver` can be added to other observers as a parameter to `GoRouter`. ```dart final _router = GoRouter( @@ -279,11 +281,12 @@ MaterialApp.router( ) ``` -For examples that use routers other than `go_router`, see [Advanced Configuration - Automatic View Tracking][7]. +For examples that use routers other than `go_router`, see [Automatically track views](#automatically-track-views). + ### Renaming Views -For all setups, you can rename views or supply custom paths by providing a [`viewInfoExtractor`][8] callback. This function can fall back to the default behavior of the observer by calling `defaultViewInfoExtractor`. For example: +For all setups, you can rename views or supply custom paths by providing a [`viewInfoExtractor`][11] callback. This function can fall back to the default behavior of the observer by calling `defaultViewInfoExtractor`. For example: ```dart RumViewInfo? infoExtractor(Route route) { @@ -304,28 +307,6 @@ var observer = DatadogNavigationObserver( ); ``` -## Automatically track resources - -Use the [Datadog Tracking HTTP Client][9] package to enable automatic tracking of resources and HTTP calls from your views. - -Add the package to your `pubspec.yaml` and add the following to your initialization file: - -```dart -final configuration = DatadogConfiguration( - // configuration - firstPartyHosts: ['example.com'], -)..enableHttpTracking() -``` - -**Note**: The Datadog Tracking HTTP Client modifies [`HttpOverrides.global`][10]. If you are using your own custom `HttpOverrides`, you may need to inherit from [`DatadogHttpOverrides`][11]. In this case, you do not need to call `enableHttpTracking`. Versions of `datadog_tracking_http_client` >= 1.3 check the value of `HttpOverrides.current` and use this for client creation, so you only need to make sure to initialize `HttpOverrides.global` prior to initializing Datadog. - -In order to enable Datadog [Distributed Tracing][12], you must set the `DatadogConfiguration.firstPartyHosts` property in your configuration object to a domain that supports distributed tracing. You can also modify the sampling rate for distributed tracing by setting the `tracingSamplingRate` on your `DatadogRumConfiguration`. - -- `firstPartyHosts` does not allow wildcards, but matches any subdomains for a given domain. For example, `api.example.com` matches `staging.api.example.com` and `prod.api.example.com`, not `news.example.com`. - -- `DatadogRumConfiguration.traceSampleRate` sets a default sampling rate of 20%. If you want all resources requests to generate a full distributed trace, set this value to `100.0`. - - ## Automatically track actions Use [`RumUserActionDetector`][13] to track user taps that happen in a given Widget tree: @@ -390,13 +371,13 @@ This means that even if users open your application while offline, no data is lo [2]: /error_tracking/ [3]: /account_management/api-app-keys/#client-tokens [4]: https://pub.dev/documentation/datadog_flutter_plugin/latest/datadog_flutter_plugin/DatadogConfiguration-class.html -[5]: /real_user_monitoring/error_tracking/flutter -[6]: https://pub.dev/packages/go_router -[7]: /real_user_monitoring/mobile_and_tv_monitoring/flutter/advanced_configuration/#automatic-view-tracking -[8]: https://pub.dev/documentation/datadog_flutter_plugin/latest/datadog_flutter_plugin/ViewInfoExtractor.html -[9]: https://pub.dev/packages/datadog_tracking_http_client -[10]: https://api.flutter.dev/flutter/dart-io/HttpOverrides/current.html -[11]: https://pub.dev/documentation/datadog_tracking_http_client/latest/datadog_tracking_http_client/DatadogTrackingHttpOverrides-class.html -[12]: /serverless/distributed_tracing -[13]: https://pub.dev/documentation/datadog_flutter_plugin/latest/datadog_flutter_plugin/RumUserActionDetector-class.html -[14]: https://pub.dev/documentation/datadog_flutter_plugin/latest/datadog_flutter_plugin/RumUserActionAnnotation-class.html +[5]: https://pub.dev/documentation/datadog_flutter_plugin/latest/datadog_flutter_plugin/ViewInfoExtractor.html +[6]: /real_user_monitoring/error_tracking/flutter +[7]: https://pub.dev/packages?q=go_router +[8]: /real_user_monitoring/mobile_and_tv_monitoring/flutter/advanced_configuration/#automatic-view-tracking +[9]: https://pub.dev/packages/auto_route +[10]: https://pub.dev/packages/beamer +[11]: https://pub.dev/documentation/datadog_flutter_plugin/latest/datadog_flutter_plugin/ViewInfoExtractor.html +[12]: /real_user_monitoring/mobile_and_tv_monitoring/flutter/integrated_libraries/ +[13]: https://pub.dev/documentation/datadog_flutter_plugin/latest/datadog_flutter_plugin/RumUserActionAnnotation-class.html +[14]: https://pub.dev/documentation/datadog_flutter_plugin/latest/datadog_flutter_plugin/RumUserActionDetector-class.html \ No newline at end of file diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/advanced_configuration.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/advanced_configuration.md index 6b2b3fa10ebb1..b94dda5a2f242 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/advanced_configuration.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/advanced_configuration.md @@ -74,6 +74,38 @@ DDRUMMonitor *rum = [DDRUMMonitor shared]; For more details and available options, filter the [relevant file on GitHub][9] for the `DDRUMMonitor` class. +### Notify the SDK that your view finished loading + +iOS RUM tracks the time it takes for your view to load. To notify the SDK that your view has finished loading, call the `addViewLoadingTime(override:)` method +through the `RUMMonitor` instance. Call this method when your view is fully loaded and displayed to the user: + +{{< tabs >}} +{{% tab "Swift" %}} +```swift +@_spi(Experimental) +import DatadogRUM + +func onHeroImageLoaded() { + let rum = RUMMonitor.shared() + rum.addViewLoadingTime(override: false) +} +``` +{{% /tab %}} +{{% tab "Objective-C" %}} +```objective-c +- (void)onHeroImageLoad { + [[DDRUMMonitor shared] addViewLoadingTimeWithOverride:NO | YES]; +} +``` +{{% /tab %}} +{{< /tabs >}} + +Use the `override` option to replace the previously calculated loading time for the current view. + +After the loading time is sent, it is accessible as `@view.loading_time` and is visible in the RUM UI. + +**Note**: This API is still experimental and might change in the future. + ### Add your own performance timing In addition to RUM's default attributes, you can measure where your application is spending its time by using the `addTiming(name:)` API. The timing measure is relative to the start of the current RUM view. @@ -262,6 +294,26 @@ Datadog.setUserInfo(id: "1234", name: "John Doe", email: "john@doe.com") {{% /tab %}} {{< /tabs >}} +## Track background events + +

Tracking background events may lead to additional sessions, which can impact billing. For questions, contact Datadog support.

+
+ +You can track events such as crashes and network requests when your application is in the background (for example, no active view is available). + +To track background events, add the following snippet during initialization in your Datadog configuration: + +```swift +import DatadogRUM + +RUM.enable( + with: RUM.Configuration( + ... + trackBackgroundEvents: true + ) +) +``` + ## Initialization Parameters You can use the following properties in `Datadog.Configuration` when creating the Datadog configuration to initialize the library: diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/error_tracking.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/error_tracking.md index 76a96ec14c280..58cf3ebe7a093 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/error_tracking.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/error_tracking.md @@ -278,12 +278,7 @@ For more information, see [dSYMs commands][11]. ## Limitations -{{< site-region region="us,us3,us5,eu,gov" >}} -dSYM files are limited to **500** MB. -{{< /site-region >}} -{{< site-region region="ap1" >}} -dSYM files are limited to **500** MB. -{{< /site-region >}} +dSYM files are limited in size to **2 GB** each. ## Test your implementation diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/setup.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/setup.md index 789ec4dc086f7..c3ba30d5995f6 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/setup.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/ios/setup.md @@ -433,7 +433,7 @@ NSURLSession *session = [NSURLSession sessionWithConfiguration:[NSURLSessionConf ### Instrument views -The Datadog iOS SDK for allows you to instrument views of `SwiftUI` applications. The instrumentation also works with hybrid `UIKit` and `SwiftUI` applications. +The Datadog iOS SDK allows you to instrument views of `SwiftUI` applications. The instrumentation also works with hybrid `UIKit` and `SwiftUI` applications. To instrument a `SwiftUI.View`, add the following method to your view declaration: @@ -475,26 +475,6 @@ struct BarView: View { } ``` -## Track background events - -

Tracking background events may lead to additional sessions, which can impact billing. For questions, contact Datadog support.

-
- -You can track events such as crashes and network requests when your application is in the background (for example, no active view is available). - -Add the following snippet during initialization in your Datadog configuration: - -```swift -import DatadogRUM - -RUM.enable( - with: RUM.Configuration( - ... - trackBackgroundEvents: true - ) -) -``` - ## Track iOS errors [iOS Crash Reporting and Error Tracking][8] displays any issues in your application and the latest available errors. You can view error details and attributes including JSON in the [RUM Explorer][9]. @@ -509,7 +489,7 @@ This means that even if users open your application while offline, no data is lo ## Supported versions -See [Supported versions][10] for a list operating system versions and platforms that are compatible with the RUM iOS SDK. +See [Supported versions][10] for a list operating system versions and platforms that are compatible with the iOS SDK. ## Further Reading diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/advanced_configuration.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/advanced_configuration.md index b6e63dca6c3b4..ecc13f61785a4 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/advanced_configuration.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/advanced_configuration.md @@ -158,6 +158,18 @@ GlobalRumMonitor.get().addAttribute(key, value) GlobalRumMonitor.get().removeAttribute(key) ``` +## Track background events + +You can track events such as crashes and network requests when your application is in the background (for example, no active view is available). + +Add the following snippet during RUM configuration: + +```kotlin +.trackBackgroundEvents(true) +``` +

Tracking background events may lead to additional sessions, which can impact billing. For questions, contact Datadog support.

+
+ ## Initialization parameters You can use the following methods in `Configuration.Builder` when creating the Datadog configuration to initialize the library: diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/error_tracking.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/error_tracking.md index 1eb49b1724bc6..712bc8477e94e 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/error_tracking.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/error_tracking.md @@ -90,7 +90,7 @@ Use the following guides to see how you can upload mapping files (Android) or dS ### File sizing -Individual debug symbols (mapping files and dSYMs) are limited to 500 MB each. +Mapping files are limited in size to **500 MB** each, while dSYM files can go up to **2 GB** each. ### Collection diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/setup.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/setup.md index f9bac092b9068..a0751b02f9934 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/setup.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/kotlin_multiplatform/setup.md @@ -378,18 +378,6 @@ val ktorClient = HttpClient { This records each request processed by the `HttpClient` as a resource in RUM, with all the relevant information automatically filled (URL, method, status code, and error). Only the network requests that started when a view is active are tracked. To track requests when your application is in the background, [create a view manually][11] or enable [background view tracking](#track-background-events). -## Track background events - -You can track events such as crashes and network requests when your application is in the background (for example, no active view is available). - -Add the following snippet during RUM configuration: - -```kotlin -.trackBackgroundEvents(true) -``` -

Tracking background events may lead to additional sessions, which can impact billing. For questions, contact Datadog support.

-
- ## Track errors [Kotlin Multiplatform Crash Reporting and Error Tracking][12] displays any issues in your application and the latest available errors. You can view error details and attributes including JSON in the [RUM Explorer][13]. diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/advanced_configuration.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/advanced_configuration.md index b231357f7905e..25399afd5a0ba 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/advanced_configuration.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/advanced_configuration.md @@ -25,7 +25,7 @@ If you have not set up the SDK yet, follow the [in-app setup instructions][1] or Testing apps using `'@datadog/mobile-react-native'` might require completing extra steps, since Native Modules do not exist in testing environments. -Datadog provides mocks for the `'@datadog/mobile-react-native'` package. To use them with [Jest][4], add the following in your Jest setup file: +Datadog provides mocks for the `'@datadog/mobile-react-native'` package. To use them with [Jest][3], add the following in your Jest setup file: ```javascript jest.mock('@datadog/mobile-react-native', () => { @@ -57,12 +57,12 @@ You can specify the following parameters in your configuration when initializing `clientToken` : Required
**Type**: String
-A [Datadog client token][8]. +A [Datadog client token][4]. `env` : Required
**Type**: String
-The application's environment, for example: prod, pre-prod, and staging. Follows the [tag syntax requirements][15]. +The application's environment, for example: prod, pre-prod, and staging. Follows the [tag syntax requirements][5]. `applicationId` : Required
@@ -91,28 +91,28 @@ Enables collection of React Native crashes. : Optional
**Type**: String
**Default**: `US1`
-[The Datadog site parameter of your organization][9]. +[The Datadog site parameter of your organization][6]. `serviceName` : Optional
**Type**: String
-The service name for your application. Follows the [tag syntax requirements][15]. +The service name for your application. Follows the [tag syntax requirements][5]. `version` : Optional
**Type**: String
-The application's version. For example: 1.2.3, 6c44da20, and 2020.02.13. Follows the [tag syntax requirements][15]. +The application's version. For example: 1.2.3, 6c44da20, and 2020.02.13. Follows the [tag syntax requirements][5]. `versionSuffix` : Optional
**Type**: String
-Add a suffix to the reported version of the app. Accepted characters are alphanumerics and `_`, `-`, `:`, `.`, `/`. Other special characters are converted to underscores. A dash (`-`) is automatically added between the version and the suffix. Follows the [tag syntax requirements][15]. +Add a suffix to the reported version of the app. Accepted characters are alphanumerics and `_`, `-`, `:`, `.`, `/`. Other special characters are converted to underscores. A dash (`-`) is automatically added between the version and the suffix. Follows the [tag syntax requirements][5]. `trackFrustrations` : Optional
**Type**: Boolean
**Default**: `true`
-Enables [automatic collection of user frustrations][11]. Only error taps are supported. Implies `trackInteractions: true`. +Enables [automatic collection of user frustrations][7]. Only error taps are supported. Implies `trackInteractions: true`. `nativeCrashReportEnabled` : Optional
@@ -136,7 +136,7 @@ The percentage of sessions to track: `100` for all, `0` for none. Only tracked s : Optional
**Type**: Number
**Default**: `20`
-The percentage of requests to trace: `100` for all, `0` for none. For more information, see [Connect RUM and Traces][12]. +The percentage of requests to trace: `100` for all, `0` for none. For more information, see [Connect RUM and Traces][8]. `verbosity` : Optional
@@ -160,7 +160,7 @@ Enables native interaction tracking. Set to `true` if you want to track interact : Optional
**Type**: List
**Default**: `[]`
-List of your backends hosts to enable tracing with. For more information, see [Connect RUM and Traces][12]. +List of your backends hosts to enable tracing with. For more information, see [Connect RUM and Traces][8]. `telemetrySampleRate` : Optional
@@ -207,7 +207,7 @@ Enables tracking of RUM event when no RUM View is active. By default, background `proxyConfig` : Optional
**Type**: ProxyConfiguration
-Optional [proxy configuration][13]. +Optional [proxy configuration][9]. `useAccessibilityLabel` : Optional
@@ -340,6 +340,35 @@ DdSdkReactNative.setAttributes({ }); ``` +## Track view navigation + +Because React Native offers a wide range of libraries to create screen navigation, only manual view tracking is supported by default. To see RUM or Error tracking sessions populate in Datadog, you need to implement view tracking. + +You can manually start and stop a view using the following `startView()` and `stopView` methods. + +```js +import { + DdRum +} from '@datadog/mobile-react-native'; + +// Start a view with a unique view identifier, a custom view name, and an object to attach additional attributes to the view +DdRum.startView( + '', // has to be unique, for example it can be ViewName-unique-id + 'View Name', + { 'custom.foo': 'something' }, + Date.now() +); +// Stops a previously started view with the same unique view identifier, and an object to attach additional attributes to the view +DdRum.stopView('', { 'custom.bar': 42 }, Date.now()); +``` + +Use one of Datadog's integrations to automatically track views for the following libraries: + +- If you use the [`react-native-navigation`][10] library, then add the `@datadog/mobile-react-native-navigation` package and follow the [setup instructions][11]. +- If you use the [`react-navigation`][12] library, then add the `@datadog/mobile-react-navigation` package and follow the [setup instructions][11]. + +If you experience any issues setting up View tracking with `@datadog/mobile-react-navigation` you can see this Datadog [example application][13] as a reference. + ## Clear all data Use `clearAllData` to clear all data that has not been sent to Datadog. @@ -400,12 +429,12 @@ Events include additional context: | ------------- | ------------------------------------------------ | ----------------------------------------------------------------------- | | LogEvent | `logEvent.additionalInformation.userInfo` | Contains the global user info set by `DdSdkReactNative.setUser`. | | | `logEvent.additionalInformation.attributes` | Contains the global attributes set by `DdSdkReactNative.setAttributes`. | -| ActionEvent | `actionEvent.actionContext` | [GestureResponderEvent][5] corresponding to the action or `undefined`. | +| ActionEvent | `actionEvent.actionContext` | [GestureResponderEvent][14] corresponding to the action or `undefined`. | | | `actionEvent.additionalInformation.userInfo` | Contains the global user info set by `DdSdkReactNative.setUser`. | | | `actionEvent.additionalInformation.attributes` | Contains the global attributes set by `DdSdkReactNative.setAttributes`. | | ErrorEvent | `errorEvent.additionalInformation.userInfo` | Contains the global user info set by `DdSdkReactNative.setUser`. | | | `errorEvent.additionalInformation.attributes` | Contains the global attributes set by `DdSdkReactNative.setAttributes`. | -| ResourceEvent | `resourceEvent.resourceContext` | [XMLHttpRequest][6] corresponding to the resource or `undefined`. | +| ResourceEvent | `resourceEvent.resourceContext` | [XMLHttpRequest][15] corresponding to the resource or `undefined`. | | | `resourceEvent.additionalInformation.userInfo` | Contains the global user info set by `DdSdkReactNative.setUser`. | | | `resourceEvent.additionalInformation.attributes` | Contains the global attributes set by `DdSdkReactNative.setAttributes`. | @@ -456,13 +485,13 @@ export default function App() { } ``` -This uses React Native's [InteractionManager.runAfterInteractions][3] to delay the animations. +This uses React Native's [InteractionManager.runAfterInteractions][16] to delay the animations. All interactions with the RUM SDK (view tracking, actions, resources tracing, and so on) are still recorded and kept in a queue with a limit of 100 events. Logs are not recorded and calling a `DdLogs` method before the actual initialization might break logging. -If you experience any issue setting up the asynchronous initialization of Datadog, you can check out our [example application][7]. +If you experience any issue setting up the asynchronous initialization of Datadog, you can check out our [example application][17]. ## Delaying the initialization @@ -521,7 +550,7 @@ const configuration = { ## Monitoring hybrid React Native applications -See [Monitor hybrid React Native applications][16]. +See [Monitor hybrid React Native applications][18]. ## Further reading @@ -529,15 +558,19 @@ See [Monitor hybrid React Native applications][16]. [1]: https://app.datadoghq.com/rum/application/create [2]: /real_user_monitoring/mobile_and_tv_monitoring/react_native -[3]: https://reactnative.dev/docs/interactionmanager#runafterinteractions -[4]: https://jestjs.io/ -[5]: https://github.com/DefinitelyTyped/DefinitelyTyped/blob/master/types/react-native/v0.70/index.d.ts#L548 -[6]: https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest -[7]: https://github.com/DataDog/dd-sdk-reactnative-examples/tree/main/rum-react-navigation-async -[8]: /account_management/api-app-keys/#client-tokens -[9]: /getting_started/site/ -[11]: /real_user_monitoring/browser/frustration_signals/ -[12]: /real_user_monitoring/platform/connect_rum_and_traces?tab=reactnativerum -[13]: /real_user_monitoring/guide/proxy-mobile-rum-data/ -[15]: /getting_started/tagging/#define-tags -[16]: /real_user_monitoring/guide/monitor-hybrid-react-native-applications \ No newline at end of file +[3]: https://jestjs.io/ +[4]: /account_management/api-app-keys/#client-tokens +[5]: /getting_started/tagging/#define-tags +[6]: /getting_started/site/ +[7]: /real_user_monitoring/browser/frustration_signals/ +[8]: /real_user_monitoring/platform/connect_rum_and_traces?tab=reactnativerum +[9]: /real_user_monitoring/guide/proxy-mobile-rum-data/ +[10]: https://github.com/wix/react-native-navigation +[11]: /real_user_monitoring/mobile_and_tv_monitoring/react_native/integrated_libraries/ +[12]: https://github.com/rmobile_and_tv_monitoring/eact-navigation/react-navigation +[13]: https://github.com/DataDog/dd-sdk-reactnative-examples/tree/main/rum-react-navigation +[14]: https://github.com/DefinitelyTyped/DefinitelyTyped/blob/683ec4a2b420ff6bd3873a7338416ad3ec0b6595/types/react-native-side-menu/index.d.ts#L2 +[15]: https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest +[16]: https://reactnative.dev/docs/interactionmanager#runafterinteractions +[17]: https://github.com/DataDog/dd-sdk-reactnative-examples/tree/main/rum-react-navigation-async +[18]: /real_user_monitoring/guide/monitor-hybrid-react-native-applications diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/error_tracking.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/error_tracking.md index aaff472ef2c49..c545ab4e39c10 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/error_tracking.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/error_tracking.md @@ -95,12 +95,7 @@ See the [RUM Debug Symbols][16] page to view all uploaded symbols. ## Limitations -{{< site-region region="us,us3,us5,eu,gov" >}} -Source maps, mapping files, and dSYM files are limited to **500** MB each. -{{< /site-region >}} -{{< site-region region="ap1" >}} -Source maps, mapping files, and dSYM files are limited to **500** MB each. -{{< /site-region >}} +Source maps and mapping files are limited in size to **500 MB** each, while dSYM files can go up to **2 GB** each. To compute the size of your source maps and bundle, run the following command: diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/setup/expo.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/setup/expo.md index 9c8fed1751d0a..c17b6b6c7ca98 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/setup/expo.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/setup/expo.md @@ -72,12 +72,12 @@ DdRum.startView( DdRum.stopView('', { 'custom.bar': 42 }, Date.now()); ``` -#### Automatic tracking +### Automatic tracking Automatic view tracking is supported for the following modules: -- React Navigation: [@Datadog/mobile-react-navigation][4] -- React Native Navigation: [@Datadog/mobile-react-native-navigation][5] +- React Navigation: [@Datadog/mobile-react-navigation][10] +- React Native Navigation: [@Datadog/mobile-react-native-navigation][11] In this Datadog example project, View Tracking is achieved through `@datadog/mobile-react-navigation` and is configured using the `NavigationContainer`: @@ -130,7 +130,7 @@ await DdSdkReactNative.initialize(config);
Configuring the session sample rate does not apply to Error Tracking.
-To control the data your application sends to Datadog RUM, you can specify a sampling rate for RUM sessions while [initializing the Expo SDK][6]. To set this rate, use the `config.sessionSamplingRate` parameter and specify a percentage between 0 and 100. +To control the data your application sends to Datadog RUM, you can specify a sampling rate for RUM sessions while [initializing the Expo SDK][4]. To set this rate, use the `config.sessionSamplingRate` parameter and specify a percentage between 0 and 100. ### Upload source maps on EAS builds @@ -162,11 +162,11 @@ yarn add -D @datadog/datadog-ci Run `eas secret:create` to set `DATADOG_API_KEY` to your Datadog API key, and `DATADOG_SITE` to the host of your Datadog site (for example, `datadoghq.com`). -For information about tracking Expo crashes, see [Expo Crash Reporting and Error Tracking][7]. +For information about tracking Expo crashes, see [Expo Crash Reporting and Error Tracking][5]. ## Tracking Expo Router screens -If you are using [Expo Router][8], track your screens in your `app/_layout.js` file: +If you are using [Expo Router][6], track your screens in your `app/_layout.js` file: ```javascript import { useEffect } from 'react'; @@ -192,10 +192,10 @@ If you are using Expo Go, switch to development builds (recommended), or keep us ### Switch from Expo Go to development builds -Your application's [development builds][9] are debug builds that contain the `expo-dev-client` package. +Your application's [development builds][7] are debug builds that contain the `expo-dev-client` package. -1. Enable the [custom native code to run][10] with `expo run:android` and `expo run:ios`. -2. To start using your development application, run `expo install expo-dev-client` and `expo start --dev-client`. This installs and starts the [`expo-dev-client` package][11] to execute the added native code in dev mode. +1. Enable the [custom native code to run][8] with `expo run:android` and `expo run:ios`. +2. To start using your development application, run `expo install expo-dev-client` and `expo start --dev-client`. This installs and starts the [`expo-dev-client` package][9] to execute the added native code in dev mode. ### Develop with Expo Go @@ -287,11 +287,11 @@ config.resourceEventMapper = event => { [1]: /real_user_monitoring/ [2]: /error_tracking/ [3]: https://github.com/DataDog/dd-sdk-reactnative-examples/tree/main/rum-expo-react-navigation -[4]: https://www.npmjs.com/package/@datadog/mobile-react-navigation -[5]: https://www.npmjs.com/package/@datadog/mobile-react-native-navigation -[6]: /real_user_monitoring/mobile_and_tv_monitoring/setup/expo#initialize-the-library-with-application-context -[7]: /real_user_monitoring/error_tracking/mobile/expo/ -[8]: https://expo.github.io/router/docs/ -[9]: https://docs.expo.dev/development/introduction/ -[10]: https://docs.expo.dev/workflow/customizing/#releasing-apps-with-custom-native-code-to -[11]: https://docs.expo.dev/development/getting-started/ +[4]: /real_user_monitoring/mobile_and_tv_monitoring/setup/expo#initialize-the-library-with-application-context +[5]: /real_user_monitoring/error_tracking/mobile/expo/ +[6]: https://expo.github.io/router/docs/ +[7]: https://docs.expo.dev/development/introduction/ +[8]: https://docs.expo.dev/workflow/customizing/#releasing-apps-with-custom-native-code-to +[9]: https://docs.expo.dev/development/getting-started/ +[10]: https://www.npmjs.com/package/@datadog/mobile-react-navigation +[11]: https://www.npmjs.com/package/@datadog/mobile-react-native-navigation \ No newline at end of file diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/setup/reactnative.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/setup/reactnative.md index f91fce13b2530..587f74fdeb159 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/setup/reactnative.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/react_native/setup/reactnative.md @@ -89,8 +89,8 @@ The Datadog React Native SDK requires you to have `compileSdkVersion = 31` or hi {{< img src="real_user_monitoring/react_native/reactnative_setup.png" alt="Create a RUM application for React Native in Datadog" style="width:90%;">}} -[1]: https://app.datadoghq.com/rum/application/create +[1]: https://app.datadoghq.com/rum/application/create {{% /tab %}} {{% tab "Error Tracking" %}} @@ -101,8 +101,8 @@ The Datadog React Native SDK requires you to have `compileSdkVersion = 31` or hi {{< img src="real_user_monitoring/error_tracking/mobile-new-application.png" alt="Create an application for React Native in Datadog" style="width:90%;">}} -[1]: https://app.datadoghq.com/error-tracking/settings/setup/client/ +[1]: https://app.datadoghq.com/error-tracking/settings/setup/client/ {{% /tab %}} {{< /tabs >}} @@ -374,35 +374,6 @@ If user interactions tracking is enabled as in the code example above, the Datad Alternatively, you can use the `accessibilityLabel` element property to give the tap action a name; otherwise, the element type is reported. You can check the sample app for usage examples. -### Track view navigation - -Because React Native offers a wide range of libraries to create screen navigation, only manual view tracking is supported by default. To see RUM or Error tracking sessions populate in Datadog, you need to implement view tracking. - -You can manually start and stop a view using the following `startView()` and `stopView` methods. - -```js -import { - DdRum -} from '@datadog/mobile-react-native'; - -// Start a view with a unique view identifier, a custom view name, and an object to attach additional attributes to the view -DdRum.startView( - '', // has to be unique, for example it can be ViewName-unique-id - 'View Name', - { 'custom.foo': 'something' }, - Date.now() -); -// Stops a previously started view with the same unique view identifier, and an object to attach additional attributes to the view -DdRum.stopView('', { 'custom.bar': 42 }, Date.now()); -``` - -Use one of Datadog's integrations to automatically track views for the following libraries: - -- If you use the [`react-native-navigation`][11] library, then add the `@datadog/mobile-react-native-navigation` package and follow the [setup instructions][12]. -- If you use the [`react-navigation`][13] library, then add the `@datadog/mobile-react-navigation` package and follow the [setup instructions][12]. - -If you experience any issues setting up View tracking with `@datadog/mobile-react-navigation` you can see this Datadog [example application][14] as a reference. - ## Sending data when device is offline The React Native SDK ensures availability of data when your user device is offline. In cases of low-network areas, or when the device battery is too low, all events are first stored on the local device in batches. They are sent as soon as the network is available, and the battery is high enough to ensure the React Native SDK does not impact the end user's experience. If the network is not available with your application running in the foreground, or if an upload of data fails, the batch is kept until it can be sent successfully. @@ -428,11 +399,11 @@ configuration.trackBackgroundEvents = true; ### Android -Before data is uploaded to Datadog, it is stored in cleartext in your application's cache directory. This cache folder is protected by [Android's Application Sandbox][15], meaning that on most devices this data can't be read by other applications. However, if the mobile device is rooted, or someone tampers with the Linux kernel, the stored data might become readable. +Before data is uploaded to Datadog, it is stored in cleartext in your application's cache directory. This cache folder is protected by [Android's Application Sandbox][11], meaning that on most devices this data can't be read by other applications. However, if the mobile device is rooted, or someone tampers with the Linux kernel, the stored data might become readable. ### iOS -Before data is uploaded to Datadog, it is stored in cleartext in the cache directory (`Library/Caches`) of your [application sandbox][16], which can't be read by any other app installed on the device. +Before data is uploaded to Datadog, it is stored in cleartext in the cache directory (`Library/Caches`) of your [application sandbox][12], which can't be read by any other app installed on the device. ## Development mode @@ -454,7 +425,7 @@ const config = new DatadogProviderConfiguration( ## New architecture support -The [React Native new architecture][15] is supported by the React Native SDK in version `>=1.8.0`. +The [React Native new architecture][11] is supported by the React Native SDK in version `>=1.8.0`. The minimum supported React Native version for the new architecture is `0.71`. @@ -488,7 +459,7 @@ pre_install do |installer| end ``` -**Note**: This solution comes from this [StackOverflow][18] post. +**Note**: This solution comes from this [StackOverflow][13] post. ## Further Reading @@ -504,11 +475,6 @@ end [8]: /account_management/api-app-keys/#client-tokens [9]: /real_user_monitoring/mobile_and_tv_monitoring/react_native/setup/reactnative/#initialize-the-library-with-application-context [10]: /getting_started/tagging/#define-tags -[11]: https://github.com/wix/react-native-navigation -[12]: /real_user_monitoring/mobile_and_tv_monitoring/react_native/integrated_libraries/ -[13]: https://github.com/rmobile_and_tv_monitoring/eact-navigation/react-navigation -[14]: https://github.com/DataDog/dd-sdk-reactnative-examples/tree/main/rum-react-navigation -[15]: https://source.android.com/security/app-sandbox -[16]: https://support.apple.com/guide/security/security-of-runtime-process-sec15bfe098e/web -[17]: https://reactnative.dev/docs/the-new-architecture/landing-page -[18]: https://stackoverflow.com/questions/37388126/use-frameworks-for-only-some-pods-or-swift-pods/60914505#60914505 +[11]: https://source.android.com/security/app-sandbox +[12]: https://support.apple.com/guide/security/security-of-runtime-process-sec15bfe098e/web +[13]: https://stackoverflow.com/questions/37388126/use-frameworks-for-only-some-pods-or-swift-pods/60914505#60914505 diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/roku/advanced_configuration.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/roku/advanced_configuration.md index 0539659b1705a..13b03f9c9d662 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/roku/advanced_configuration.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/roku/advanced_configuration.md @@ -131,10 +131,11 @@ In addition to the default attributes captured by the SDK automatically, you can m.global.setField("datadogContext", { foo: "Some value", bar: 123}) ``` +## Further Reading + +{{< partial name="whats-next/whats-next.html" >}} + [1]: https://app.datadoghq.com/rum/application/create [2]: /real_user_monitoring/mobile_and_tv_monitoring/roku/setup -## Further Reading - -{{< partial name="whats-next/whats-next.html" >}} diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/roku/setup.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/roku/setup.md index a54f3e998b8b2..a8e3d50c65fdf 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/roku/setup.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/roku/setup.md @@ -195,13 +195,14 @@ end sub To control the data your application sends to Datadog RUM, you can specify a sampling rate for RUM sessions while [initializing the RUM Roku SDK][8]. The rate is a percentage between 0 and 100. By default, `sessionSamplingRate` is set to 100 (keep all sessions). -### Step 4 - Instrument the channel -See [**Track RUM Resources**][9] to enable automatic tracking of all your resources, and [**Enrich user sessions**][10] to add custom global or user information to your events. +## Instrument the channel -#### Track Views +See [**Track RUM Resources**](#track-rum-resources) to enable automatic tracking of all your resources, and [**Enrich user sessions**](#enrich-user-sessions) to add custom global or user information to your events. -To split [user sessions][11] into logical steps, manually start a View using the following code. Every navigation to a new screen within your channel should correspond to a new View. +### Track Views + +To split [user sessions][9] into logical steps, manually start a View using the following code. Every navigation to a new screen within your channel should correspond to a new View. ```brightscript viewName = "VideoDetails" @@ -209,7 +210,7 @@ To split [user sessions][11] into logical steps, manually start a View using the m.global.datadogRumAgent.callfunc("startView", viewName, viewUrl) ``` -#### Track RUM Actions +### Track RUM Actions RUM Actions represent the interactions your users have with your channel. You can forward actions to Datadog as follows: @@ -219,7 +220,7 @@ RUM Actions represent the interactions your users have with your channel. You ca m.global.datadogRumAgent.callfunc("addAction", { target: targetName, type: actionType}) ``` -#### Track RUM errors +### Track RUM errors Whenever you perform an operation that might throw an exception, you can forward the error to Datadog as follows: @@ -251,6 +252,4 @@ This means that even if users open your application while offline, no data is lo [6]: /account_management/api-app-keys/#client-tokens [7]: /getting_started/tagging/using_tags/#rum--session-replay [8]: /real_user_monitoring/mobile_and_tv_monitoring/roku/advanced_configuration/#enrich-user-sessions -[9]: /real_user_monitoring/mobile_and_tv_monitoring/roku/advanced_configuration/#track-rum-resources -[10]: /real_user_monitoring/mobile_and_tv_monitoring/roku/advanced_configuration/#enrich-user-sessions -[11]: /real_user_monitoring/mobile_and_tv_monitoring/roku/data_collected +[9]: /real_user_monitoring/mobile_and_tv_monitoring/roku/data_collected \ No newline at end of file diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/unity/_index.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/unity/_index.md index e53940ee452e7..769ab0fca123b 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/unity/_index.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/unity/_index.md @@ -28,6 +28,5 @@ To get started with RUM for Unity, create an application and configure the Unity {{< nextlink href="/real_user_monitoring/mobile_and_tv_monitoring/unity/advanced_configuration">}}Advanced Configuration:Enrich user sessions, manage events and data, track custom global attributes and widgets, review initialization parameters, modify or drop RUM events, and more.{{< /nextlink >}} {{< nextlink href="/real_user_monitoring/mobile_and_tv_monitoring/unity/data_collected">}}Data Collected:Review data that the Unity SDK collects.{{< /nextlink >}} {{< nextlink href="/real_user_monitoring/mobile_and_tv_monitoring/unity/mobile_vitals">}}Mobile Vitals: View mobile vitals, which help compute insights about your mobile application.{{< /nextlink >}} - {{< nextlink href="/real_user_monitoring/mobile_and_tv_monitoring/unity/web_view_tracking">}}Web View Tracking: Monitor web views and eliminate blind spots in your mobile applications.{{< /nextlink >}} {{< nextlink href="/real_user_monitoring/mobile_and_tv_monitoring/unity/troubleshooting">}}Troubleshooting: Common troubleshooting Unity SDK issues.{{< /nextlink >}} {{< /whatsnext >}} diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/unity/error_tracking.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/unity/error_tracking.md index 297b19679c281..e7230e971a62e 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/unity/error_tracking.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/unity/error_tracking.md @@ -81,12 +81,7 @@ See the [RUM Debug Symbols][6] page to view all uploaded symbols. ## Limitations -{{< site-region region="us,us3,us5,eu,gov" >}} -Source maps and dSYM files are limited to **500** MB each. -{{< /site-region >}} -{{< site-region region="ap1" >}} -Source maps and dSYM files are limited to **500** MB each. -{{< /site-region >}} +Mapping files are limited in size to **500 MB** each, while dSYM files can go up to **2 GB** each. ## Test your implementation diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/unity/web_view_tracking.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/unity/web_view_tracking.md deleted file mode 100644 index 64bcbc9eb058f..0000000000000 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/unity/web_view_tracking.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Unity Web View Tracking ---- - -{{< include-markdown "real_user_monitoring/mobile_and_tv_monitoring/web_view_tracking" >}} \ No newline at end of file diff --git a/content/en/real_user_monitoring/mobile_and_tv_monitoring/web_view_tracking/_index.md b/content/en/real_user_monitoring/mobile_and_tv_monitoring/web_view_tracking/_index.md index 8cbe73e1d1510..7fc390cfa1090 100644 --- a/content/en/real_user_monitoring/mobile_and_tv_monitoring/web_view_tracking/_index.md +++ b/content/en/real_user_monitoring/mobile_and_tv_monitoring/web_view_tracking/_index.md @@ -6,6 +6,7 @@ aliases: - /real_user_monitoring/flutter/web_view_tracking - /real_user_monitoring/reactnative/web_view_tracking - /real_user_monitoring/kotlin-multiplatform/web_view_tracking + - /real_user_monitoring/mobile_and_tv_monitoring/unity/web_view_tracking further_reading: - link: https://github.com/DataDog/dd-sdk-android tag: "Source Code" diff --git a/content/en/real_user_monitoring/platform/connect_rum_and_traces.md b/content/en/real_user_monitoring/platform/connect_rum_and_traces.md index b18d2ef182562..235972654089e 100644 --- a/content/en/real_user_monitoring/platform/connect_rum_and_traces.md +++ b/content/en/real_user_monitoring/platform/connect_rum_and_traces.md @@ -418,7 +418,7 @@ The default injection style is `tracecontext`, `Datadog`. `propagatorTypes` accepts a list of strings for desired propagators: - `datadog`: Datadog's propagator (`x-datadog-*`) - - `tracecontext`: [W3C Trace Context](https://www.w3.org/TR/trace-context/) (`traceparent`) + - `tracecontext`: [W3C Trace Context](https://www.w3.org/TR/trace-context/) (`traceparent`, `tracestate`) - `b3`: [B3 single header](https://github.com/openzipkin/b3-propagation#single-header) (`b3`) - `b3multi`: [B3 multiple headers](https://github.com/openzipkin/b3-propagation#multiple-headers) (`X-B3-*`) @@ -540,8 +540,8 @@ Datadog uses the distributed tracing protocol and sets up the HTTP headers below `x-datadog-origin: rum` : To make sure the generated traces from Real User Monitoring don't affect your APM Index Spans counts. -`x-datadog-sampling-priority: 1` -: To make sure that the Agent keeps the trace. +`x-datadog-sampling-priority` +: Set to `1` by the Real User Monitoring SDK if the trace was sampled, or `0` if it was not. {{% /tab %}} {{% tab "W3C Trace Context" %}} @@ -551,8 +551,15 @@ Datadog uses the distributed tracing protocol and sets up the HTTP headers below : `parent id`: 64 bits span ID, hexadecimal on 16 characters. : `trace flags`: Sampled (`01`) or not sampled (`00`) +`tracestate: dd=s:[sampling priority];o:[origin]` +: `dd`: Datadog's vendor prefix. +: `sampling priority`: Set to `1` if the trace was sampled, or `0` if it was not. +: `origin`: Always set to `rum` to make sure the generated traces from Real User Monitoring don't affect your APM Index Spans counts. + Example: : `traceparent: 00-00000000000000008448eb211c80319c-b7ad6b7169203331s-01` +: `tracestate: dd=s:1;o:rum` + {{% /tab %}} {{% tab "b3 / b3 Multiple Headers" %}} `b3: [trace id]-[span id]-[sampled]` diff --git a/content/en/security/application_security/software_composition_analysis/setup/compatibility/go.md b/content/en/security/application_security/software_composition_analysis/setup/compatibility/go.md index 09e149f52a267..045955dbf42a0 100644 --- a/content/en/security/application_security/software_composition_analysis/setup/compatibility/go.md +++ b/content/en/security/application_security/software_composition_analysis/setup/compatibility/go.md @@ -50,6 +50,9 @@ The Go tracer includes support for the following frameworks, data stores and lib The Go packages listed in this page are relevant for Application Security capabilities. You can also find more tracing integrations in [APM's tracing compatibility page][16]. +{{< tabs >}} +{{% tab "v1" %}} + **Note**: The [Go integrations documentation][6] provides a detailed overview of the supported packages and their APIs, along with usage examples.
If you don't see your library of choice listed, fill out this form to send details.
@@ -58,35 +61,30 @@ The Go packages listed in this page are relevant for Application Security capabi | Framework | Threat Detection supported? | Threat Protection supported? | |-------------------|-----------------------------|------------------------------| -| [net/http][13] | {{< X >}} | {{< X >}} | -| [Gin][7] | {{< X >}} | {{< X >}} | -| [Gorilla Mux][8] | {{< X >}} | {{< X >}} | -| [gRPC][11] | {{< X >}} | {{< X >}} | -| [echo v4][9] | {{< X >}} | {{< X >}} | -| [echo v3][10] | {{< X >}} | {{< X >}} | -| [chi][12] | {{< X >}} | {{< X >}} | -| [graphql-go][17] | {{< X >}} | {{< X >}} | -| [gqlgen][18] | {{< X >}} | {{< X >}} | +| [net/http][13] | | | +| [Gin][7] | | | +| [Gorilla Mux][8] | | | +| [gRPC][11] | | | +| [echo v4][9] | | | +| [echo v3][10] | | | +| [chi][12] | | | +| [graphql-go][17] | | | +| [gqlgen][18] | | | ### Networking framework compatibility | Framework | Threat Detection supported? | Threat Protection supported? | |-----------------------|-----------------------------|------------------------------| -| [gRPC client][11] | {{< X >}} | {{< X >}} | -| [net/http client][13] | {{< X >}} | {{< X >}} | +| [gRPC client][11] | | | +| [net/http client][13] | | | ### Data store compatibility | Framework | Threat Detection supported? | Threat Protection supported? | |-------------------|-----------------|--------------------------------------------------------------------------| -| [sql][14] | {{< X >}} | {{< X >}} | +| [sql][14] | | | -[1]: /agent/remote_config/#enabling-remote-configuration -[2]: https://github.com/DataDog/dd-trace-go -[3]: https://github.com/DataDog/dd-trace-go#support-policy -[4]: https://github.com/DataDog/dd-trace-go#support-maintenance -[5]: https://www.datadoghq.com/support/ [6]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib [7]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin [8]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux @@ -97,7 +95,62 @@ The Go packages listed in this page are relevant for Application Security capabi [13]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http [14]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql [15]: https://github.com/golang/go/wiki/cgo -[16]: /tracing/compatibility_requirements/go [17]: https://pkg.go.dev/github.com/graphql-go/graphql [18]: https://pkg.go.dev/github.com/99designs/gqlgen/graphql +{{% /tab %}} + +{{% tab "v2" %}} + +**Note**: The [Go integrations documentation][19] provides a detailed overview of the supported packages and their APIs, along with usage examples. + +
If you don't see your library of choice listed, fill out this form to send details.
+ +### Web framework compatibility + +| Framework | Threat Detection supported? | Threat Protection supported? | +|-------------------|-----------------------------|------------------------------| +| [net/http][26] | | | +| [Gin][20] | | | +| [Gorilla Mux][21] | | | +| [gRPC][24] | | | +| [echo v4][22] | | | +| [chi][25] | | | +| [graphql-go][17] | | | +| [gqlgen][18] | | | + + +### Networking framework compatibility + +| Framework | Threat Detection supported? | Threat Protection supported? | +|-----------------------|-----------------------------|------------------------------| +| [gRPC client][24] | | | +| [net/http client][26] | | | + +### Data store compatibility + +| Framework | Threat Detection supported? | Threat Protection supported? | +|-------------------|-----------------|--------------------------------------------------------------------------| +| [sql][27] | | | + +[17]: https://pkg.go.dev/github.com/graphql-go/graphql +[18]: https://pkg.go.dev/github.com/99designs/gqlgen/graphql +[19]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/contrib/ +[20]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/gin-gonic/gin/v2 +[21]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2 +[22]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/labstack/echo.v4/v2 +[23]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/labstack/echo/v2 +[24]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/google.golang.org/grpc/v2 +[25]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2 +[26]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/net/http/v2 +[27]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/database/sql/v2 + +{{% /tab %}} +{{< /tabs >}} + +[1]: /agent/remote_config/#enabling-remote-configuration +[2]: https://github.com/DataDog/dd-trace-go +[3]: https://github.com/DataDog/dd-trace-go#support-policy +[4]: https://github.com/DataDog/dd-trace-go#support-maintenance +[5]: https://www.datadoghq.com/support/ +[16]: /tracing/compatibility_requirements/go \ No newline at end of file diff --git a/content/en/security/application_security/threats/add-user-info.md b/content/en/security/application_security/threats/add-user-info.md index f8e0cd1e94ca3..852e1264bbec0 100644 --- a/content/en/security/application_security/threats/add-user-info.md +++ b/content/en/security/application_security/threats/add-user-info.md @@ -116,12 +116,16 @@ For information and options, read [the .NET tracer documentation][1]. {{< programming-lang lang="go" >}} -The Go tracer package provides the `SetUser()` function, which allows you to monitor authenticated requests by adding user information to the trace. For more options, see [the Go tracer documentation][1]. +The Go tracer package provides the `SetUser()` function, which allows you to monitor authenticated requests by adding user information to the trace. For more options, see [the Go tracer documentation][1] (or [v2 documentation][2]). This example shows how to retrieve the current tracer span, use it to set user monitoring tags, and enable user blocking capability: ```go -import "gopkg.in/DataDog/dd-trace-go.v1/appsec" +import ( + "gopkg.in/DataDog/dd-trace-go.v1/appsec" // 1.x + // "github.com/DataDog/dd-trace-go/v2/appsec // 2.x +) + func handler(w http.ResponseWriter, r *http.Request) { if appsec.SetUser(r.Context(), "my-uid") != nil { // The user must be blocked by aborting the request handler asap. @@ -132,6 +136,7 @@ func handler(w http.ResponseWriter, r *http.Request) { ``` [1]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#SetUser +[2]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer#SetUser {{< /programming-lang >}} {{< programming-lang lang="ruby" >}} @@ -475,7 +480,10 @@ The following examples show how to track login events or custom events (using si {{< tabs >}} {{% tab "Login success" %}} ```go -import "gopkg.in/DataDog/dd-trace-go.v1/appsec" +import ( + "gopkg.in/DataDog/dd-trace-go.v1/appsec" // 1.x + // "github.com/DataDog/dd-trace-go/v2/appsec" // 2.x +) func handler(w http.ResponseWriter, r *http.Request) { metadata := make(map[string]string) /* optional extra event metadata */ @@ -494,7 +502,10 @@ func handler(w http.ResponseWriter, r *http.Request) { {{% /tab %}} {{% tab "Login failure" %}} ```go -import "gopkg.in/DataDog/dd-trace-go.v1/appsec" +import ( + "gopkg.in/DataDog/dd-trace-go.v1/appsec" // 1.x + // "github.com/DataDog/dd-trace-go/v2/appsec" // 2.x +) func handler(w http.ResponseWriter, r *http.Request) { exists := /* whether the given user id exists or not */ @@ -509,7 +520,10 @@ func handler(w http.ResponseWriter, r *http.Request) { {{% tab "Custom business logic" %}} ```go -import "gopkg.in/DataDog/dd-trace-go.v1/appsec" +import ( + "gopkg.in/DataDog/dd-trace-go.v1/appsec" // 1.x + // "github.com/DataDog/dd-trace-go/v2/appsec" // 2.x +) func handler(w http.ResponseWriter, r *http.Request) { metadata := map[string]string{"usr.id": "my-uid"} diff --git a/content/en/security/application_security/threats/exploit-prevention.md b/content/en/security/application_security/threats/exploit-prevention.md index 96a90b36f722d..2c34685892d6b 100644 --- a/content/en/security/application_security/threats/exploit-prevention.md +++ b/content/en/security/application_security/threats/exploit-prevention.md @@ -63,7 +63,7 @@ ASM Exploit Prevention intercepts all SQL queries to determine if a user paramet | Server-side Request Forgery (SSRF) | v3.3.0 | v2.15.0 | v1.70.1 | v1.42.0 | v5.20.0, v4.44.0 | Avail. in Q1 '25 | Avail. in Q1 '25 | | Local File Inclusion (LFI) | v3.5.0 | v2.15.0 | [orchestrion][3] v1.0.0 | v1.42.0 | v5.24.0, v4.48.0 | Avail. in Q1 '25 | Avail. in Q1 '25 | | SQL Injection (SQLi) | v3.4.0 | v2.16.0 | v1.70.1 | v1.42.0 | v5.25.0, v4.49.0 | Avail. in Q1 '25 | Avail. in Q4 '24 | -| Command Injection | v3.4.0 | v2.15.0 | Avail. in Q4 '24 | Avail. in Q1 '25 | v5.25.0, v4.49.0 | Avail. in Q1 '25 | Avail. in Q1 '25 | +| Command Injection | v3.4.0 | v2.15.0 | Avail. in Q4 '24 | v1.45.0 | v5.25.0, v4.49.0 | Avail. in Q1 '25 | Avail. in Q1 '25 | ## Enabling Exploit Prevention diff --git a/content/en/security/application_security/threats/setup/compatibility/go.md b/content/en/security/application_security/threats/setup/compatibility/go.md index 09e149f52a267..ed0844a044a49 100644 --- a/content/en/security/application_security/threats/setup/compatibility/go.md +++ b/content/en/security/application_security/threats/setup/compatibility/go.md @@ -50,6 +50,9 @@ The Go tracer includes support for the following frameworks, data stores and lib The Go packages listed in this page are relevant for Application Security capabilities. You can also find more tracing integrations in [APM's tracing compatibility page][16]. +{{% tabs %}} +{{% tab "v1" %}} + **Note**: The [Go integrations documentation][6] provides a detailed overview of the supported packages and their APIs, along with usage examples.
If you don't see your library of choice listed, fill out this form to send details.
@@ -58,35 +61,30 @@ The Go packages listed in this page are relevant for Application Security capabi | Framework | Threat Detection supported? | Threat Protection supported? | |-------------------|-----------------------------|------------------------------| -| [net/http][13] | {{< X >}} | {{< X >}} | -| [Gin][7] | {{< X >}} | {{< X >}} | -| [Gorilla Mux][8] | {{< X >}} | {{< X >}} | -| [gRPC][11] | {{< X >}} | {{< X >}} | -| [echo v4][9] | {{< X >}} | {{< X >}} | -| [echo v3][10] | {{< X >}} | {{< X >}} | -| [chi][12] | {{< X >}} | {{< X >}} | -| [graphql-go][17] | {{< X >}} | {{< X >}} | -| [gqlgen][18] | {{< X >}} | {{< X >}} | +| [net/http][13] | | | +| [Gin][7] | | | +| [Gorilla Mux][8] | | | +| [gRPC][11] | | | +| [echo v4][9] | | | +| [echo v3][10] | | | +| [chi][12] | | | +| [graphql-go][17] | | | +| [gqlgen][18] | | | ### Networking framework compatibility | Framework | Threat Detection supported? | Threat Protection supported? | |-----------------------|-----------------------------|------------------------------| -| [gRPC client][11] | {{< X >}} | {{< X >}} | -| [net/http client][13] | {{< X >}} | {{< X >}} | +| [gRPC client][11] | | | +| [net/http client][13] | | | ### Data store compatibility | Framework | Threat Detection supported? | Threat Protection supported? | |-------------------|-----------------|--------------------------------------------------------------------------| -| [sql][14] | {{< X >}} | {{< X >}} | +| [sql][14] | | | -[1]: /agent/remote_config/#enabling-remote-configuration -[2]: https://github.com/DataDog/dd-trace-go -[3]: https://github.com/DataDog/dd-trace-go#support-policy -[4]: https://github.com/DataDog/dd-trace-go#support-maintenance -[5]: https://www.datadoghq.com/support/ [6]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib [7]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin [8]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux @@ -96,8 +94,61 @@ The Go packages listed in this page are relevant for Application Security capabi [12]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi [13]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http [14]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql -[15]: https://github.com/golang/go/wiki/cgo [16]: /tracing/compatibility_requirements/go [17]: https://pkg.go.dev/github.com/graphql-go/graphql [18]: https://pkg.go.dev/github.com/99designs/gqlgen/graphql +{{% /tab %}} +{{% tab "v2" %}} + +**Note**: The [Go integrations documentation][19] provides a detailed overview of the supported packages and their APIs, along with usage examples. + +
If you don't see your library of choice listed, fill out this form to send details.
+ +### Web framework compatibility + +| Framework | Threat Detection supported? | Threat Protection supported? | +|-------------------|-----------------------------|------------------------------| +| [net/http][25] | | | +| [Gin][20] | | | +| [Gorilla Mux][21] | | | +| [gRPC][23] | | | +| [echo v4][22] | | | +| [chi][24] | | | +| [graphql-go][17] | | | +| [gqlgen][18] | | | + + +### Networking framework compatibility + +| Framework | Threat Detection supported? | Threat Protection supported? | +|-----------------------|-----------------------------|------------------------------| +| [gRPC client][23] | | | +| [net/http client][25] | | | + +### Data store compatibility + +| Framework | Threat Detection supported? | Threat Protection supported? | +|-------------------|-----------------|--------------------------------------------------------------------------| +| [sql][26] | | | + +[17]: https://pkg.go.dev/github.com/graphql-go/graphql +[18]: https://pkg.go.dev/github.com/99designs/gqlgen/graphql +[19]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/contrib +[20]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/gin-gonic/gin/v2 +[21]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2 +[22]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/labstack/echo.v4/v2 +[23]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/google.golang.org/grpc/v2 +[24]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2 +[25]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/net/http/v2 +[26]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/database/sql/v2 + +{{% /tab %}} +{{% /tabs %}} + +[1]: /agent/remote_config/#enabling-remote-configuration +[2]: https://github.com/DataDog/dd-trace-go +[3]: https://github.com/DataDog/dd-trace-go#support-policy +[4]: https://github.com/DataDog/dd-trace-go#support-maintenance +[5]: https://www.datadoghq.com/support/ +[15]: https://github.com/golang/go/wiki/cgo \ No newline at end of file diff --git a/content/en/security/application_security/threats/setup/compatibility/java.md b/content/en/security/application_security/threats/setup/compatibility/java.md index 3cdef26c3a17d..e5a601c159532 100644 --- a/content/en/security/application_security/threats/setup/compatibility/java.md +++ b/content/en/security/application_security/threats/setup/compatibility/java.md @@ -68,6 +68,7 @@ Datadog does not officially support any early-access versions of Java. | ----------------------- | ---------- | --------------- | ---------------------------------------------- | ---------------------------------------------- | | Grizzly | 2.0+ | {{< X >}} | {{< X >}} | {{< X >}} | | Glassfish | | {{< X >}} | {{< X >}} | {{< X >}} | +| gRPC | 1.5+ | {{< X >}} | {{< tooltip text="N/A" tooltip="Blocking not yet available for gRPC" >}} | {{< X >}} | | Java Servlet | 2.3+, 3.0+ | {{< X >}} | {{< X >}} | {{< X >}} | | Jetty | 7.0-9.x, 10.x | {{< X >}} | {{< X >}} | {{< X >}} | | Spring Boot | 1.5 | {{< X >}} | {{< X >}} | {{< X >}} | diff --git a/content/en/security/application_security/threats/setup/compatibility/nodejs.md b/content/en/security/application_security/threats/setup/compatibility/nodejs.md index 1e5397f8397a5..bf770982a8004 100644 --- a/content/en/security/application_security/threats/setup/compatibility/nodejs.md +++ b/content/en/security/application_security/threats/setup/compatibility/nodejs.md @@ -75,7 +75,7 @@ The following operating systems are officially supported by `dd-trace`. Any oper ##### Application Security Capability Notes - **Software Composition Analysis** is supported on all frameworks - If your framework is not listed below, **Code Security** will still detect Weak Cipher, Weak Hashing, Insecure Cookie, Cookie without HttpOnly Flag, and Cookie without SameSite Flag vulnerabilities. - +- Although Threat Protection is available for express >= 4 versions, the blocking of payloads on the body is only supported for applications using `body-parser` library. | Framework | Versions | Threat Detection supported? | Threat Protection supported? | Code Security? | |-----------|----------|-----------------------------|------------------------------|----------------------------------------------------| diff --git a/content/en/security/application_security/threats/setup/threat_detection/go.md b/content/en/security/application_security/threats/setup/threat_detection/go.md index da28586be2d09..58f6db417a13a 100644 --- a/content/en/security/application_security/threats/setup/threat_detection/go.md +++ b/content/en/security/application_security/threats/setup/threat_detection/go.md @@ -32,8 +32,9 @@ You can monitor application security for Go apps running in Docker, Kubernetes, 1. **Add to your program's go.mod dependencies** the latest version of the Datadog Go library (version 1.53.0 or later): - ```console - $ go get -v -u gopkg.in/DataDog/dd-trace-go.v1 + ```shell + $ go get -v -u gopkg.in/DataDog/dd-trace-go.v1 # v1 + # $ go get -v -u github.com/DataDog/dd-trace-go/v2/ddtrace/tracer # v2 ``` 2. Datadog has a series of pluggable packages which provide out-of-the-box support for instrumenting a series of Go libraries and frameworks. diff --git a/content/en/security/application_security/troubleshooting.md b/content/en/security/application_security/troubleshooting.md index 39427c258f6f2..9138b7685b10a 100644 --- a/content/en/security/application_security/troubleshooting.md +++ b/content/en/security/application_security/troubleshooting.md @@ -191,13 +191,13 @@ There are no required integrations for PHP. The following Go frameworks should be instrumented using the out-of-the-box APM integrations: -- [gRPC][2] -- [net/http][3] -- [Gorilla Mux][4] -- [Echo][5] -- [Chi][6] +- [gRPC][2] ([v2][8]) +- [net/http][3] ([v2][9]) +- [Gorilla Mux][4] ([v2][10]) +- [Echo][5] ([v2][11]) +- [Chi][6] ([v2][12]) -If your framework is not supported, [create a new issue][7] in the Go repository. +Please be sure to reference the docs appropriate for your version (v1.x or v2.x) of the Go Tracer. If your framework is not supported, [create a new issue][7] in the Go repository. [2]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc#example-package-Server [3]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http#example-package @@ -205,6 +205,12 @@ If your framework is not supported, [create a new issue][7] in the Go repository [5]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/labstack/echo.v4#example-package [6]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi.v5#example-package [7]: https://github.com/DataDog/dd-trace-go/issues/new?title=Missing%20appsec%20framework%20support +[8]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/google.golang.org/grpc/v2 +[9]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/net/http/v2 +[10]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2 +[11]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/labstack/echo.v4/v2 +[12]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/go-chi/chi.v5/v2 + {{< /programming-lang >}} {{< programming-lang lang="Node.js" >}} diff --git a/content/en/security/vulnerability_pipeline/_index.md b/content/en/security/automation_pipelines/_index.md similarity index 71% rename from content/en/security/vulnerability_pipeline/_index.md rename to content/en/security/automation_pipelines/_index.md index 9197fae6e8257..bd4744b02819f 100644 --- a/content/en/security/vulnerability_pipeline/_index.md +++ b/content/en/security/automation_pipelines/_index.md @@ -1,25 +1,27 @@ --- -title: Vulnerability Pipeline +title: Automation Pipelines +aliases: + - /security/vulnerability_pipeline further_reading: - - link: "/security/vulnerability_pipeline/mute" + - link: "/security/automation_pipelines/mute" tag: "Documentation" text: "Mute Rules" - - link: "/security/vulnerability_pipeline/security_inbox" + - link: "/security/automation_pipelines/security_inbox" tag: "Documentation" text: "Add to Security Inbox Rules" --- {{< callout btn_hidden="true">}} - Vulnerability Pipeline is in Preview. To enroll and access the automated rules, you must register for each set of rules separately: + Automation Pipelines is in Preview. To enroll and access the automated rules, you must register for each set of rules separately: {{< /callout >}} -Vulnerability Pipeline allows you to set up automated rules for newly discovered vulnerabilities, thus accelerating triage and remediation efforts at scale. +Automation Pipelines allows you to set up automated rules for newly discovered vulnerabilities, thus accelerating triage and remediation efforts at scale. ## Availability -Vulnerability Pipeline is available for: +Automation Pipelines is available for: - Misconfigurations - Attack paths @@ -28,10 +30,10 @@ Vulnerability Pipeline is available for: ## How it works -Vulnerability Pipeline operates through a rules-based system that allows you to automate how new vulnerabilities are managed. Here's how it works: +Automation Pipelines operates through a rules-based system that allows you to automate how new vulnerabilities are managed. Here's how it works: - **Rule configuration**: Each rule consists of multiple criteria, designed to filter vulnerabilities based on specific attributes. Within a rule, the combination of these criteria operates as a logical AND; however, if any criteria include multiple values, those values operate as a logical OR. This structure gives you the flexibility to create rules that precisely target your needs. -- **Rule matching**: Vulnerability Pipeline evaluates vulnerabilities against your rules in the order you've listed them. As each vulnerability is processed, Vulnerability Pipeline moves through the list until it finds a matching rule, at which point the specified action—such as muting non-urgent issues or highlighting critical threats—is triggered. +- **Rule matching**: Automation Pipelines evaluates vulnerabilities against your rules in the order you've listed them. As each vulnerability is processed, Automation Pipelines moves through the list until it finds a matching rule, at which point the specified action—such as muting non-urgent issues or highlighting critical threats—is triggered. ## Use cases diff --git a/content/en/security/vulnerability_pipeline/mute.md b/content/en/security/automation_pipelines/mute.md similarity index 87% rename from content/en/security/vulnerability_pipeline/mute.md rename to content/en/security/automation_pipelines/mute.md index d2ae790eb02f6..514dd167ca428 100644 --- a/content/en/security/vulnerability_pipeline/mute.md +++ b/content/en/security/automation_pipelines/mute.md @@ -1,16 +1,18 @@ --- title: Mute Rules +aliases: + - /security/vulnerability_pipeline/mute --- {{< callout url="https://www.datadoghq.com/product-preview/security-automation-pipelines/" >}} - Vulnerability Pipeline is in Preview. To enroll in the Preview for mute rules, click Request Access. -{{< /callout >}} + Automation Pipelines is in Preview. To enroll in the Preview for mute rules, click Request Access. +{{< /callout >}} Configure mute rules to streamline security alerts by automatically filtering out non-urgent findings. This approach helps reduce noise from known false positives and accepted risks, allowing you to focus on addressing the most critical threats. ## Create a mute rule -1. On the [Vulnerability Pipeline][2] page, click **Add a New Rule** and select **Mute**. +1. On the [Automation Pipelines][2] page, click **Add a New Rule** and select **Mute**. 1. Enter a descriptive name for the rule, for example, Cloud Infrastructure Anomaly Warnings. 1. Use the following boxes to configure the rule criteria: - **Any of these types**: The types of findings that the rule should check for. Available types include **Misconfiguration**, **Attack Path**, **Identity Risk**, and **API Security Finding**. diff --git a/content/en/security/vulnerability_pipeline/security_inbox.md b/content/en/security/automation_pipelines/security_inbox.md similarity index 84% rename from content/en/security/vulnerability_pipeline/security_inbox.md rename to content/en/security/automation_pipelines/security_inbox.md index 48b0b573b6a0e..1f6fe58f77236 100644 --- a/content/en/security/vulnerability_pipeline/security_inbox.md +++ b/content/en/security/automation_pipelines/security_inbox.md @@ -1,5 +1,7 @@ --- title: Add to Security Inbox Rules +aliases: + - /security/vulnerability_pipeline/security_inbox further_reading: - link: "/security/security_inbox" tag: "Documentation" @@ -7,14 +9,14 @@ further_reading: --- {{< callout url="https://www.datadoghq.com/product-preview/customize-your-security-inbox/" >}} - Vulnerability Pipeline is in Preview. To enroll in the Preview for Add to Security Inbox rules, click Request Access. + Automation Pipelines is in Preview. To enroll in the Preview for Add to Security Inbox rules, click Request Access. {{< /callout >}} Configure inbox rules to manage your Security Inbox effectively, ensuring only the most relevant security issues are highlighted. By customizing conditions, you can focus on critical concerns, prioritize key risks, support compliance, and bring attention to issues that might otherwise be overlooked. ## Create an inbox rule -1. On the [Vulnerability Pipeline][2] page, click **Add a New Rule** and select **Add to Security Inbox**. +1. On the [Automation Pipelines][2] page, click **Add a New Rule** and select **Add to Security Inbox**. 1. Enter a descriptive name for the rule, for example, Cloud Infrastructure Anomaly Warnings. 1. Use the following boxes to configure the rule criteria: - **Any of these types**: The types of findings that the rule should check for. Available types include **Misconfiguration**, **Attack Path**, **Identity Risk**, and **API Security Finding**. diff --git a/content/en/security/cloud_security_management/iac_scanning.md b/content/en/security/cloud_security_management/iac_scanning.md index 8ada7394e4511..7b4712f38006d 100644 --- a/content/en/security/cloud_security_management/iac_scanning.md +++ b/content/en/security/cloud_security_management/iac_scanning.md @@ -1,5 +1,9 @@ --- title: IaC Scanning +further_reading: + - link: "/security/cloud_security_management/setup/iac_scanning" + tag: "Documentation" + text: "Setting up IaC Scanning" --- {{< callout url="https://www.datadoghq.com/product-preview/iac-security/" >}} @@ -8,37 +12,14 @@ title: IaC Scanning Static Infrastructure as Code (IaC) scanning integrates with version control systems, such as GitHub, to detect misconfigurations in cloud resources defined by Terraform. The scanning results are displayed in two primary locations: within pull requests during code modifications and on the **Explorers** page within Cloud Security Management. +
Static IaC scanning supports GitHub for version control and Terraform for infrastructure as code.
+ {{< img src="security/csm/iac_scanning_explorer2.png" alt="CSM Explorers page displaying detected misconfigurations in cloud resources" width="100%">}} When you click on a finding, the side panel reveals additional details, including a short description of the IaC rule related to the finding and a preview of the offending code. {{< img src="security/csm/iac_scanning_finding.png" alt="Finding side panel highlighting undefined EBS volume encryption in Terraform code." width="100%">}} -## Supported providers - -- **Version control system**: GitHub -- **Infrastructure as code tool**: Terraform - -## Setup - -### Set up the GitHub integration - -Follow [the instructions][3] for creating a GitHub app for your organization. - -
To use IaC scanning, you must give the GitHub App Read & Write permissions for Contents and Pull Requests. These permissions can be applied to all or select repositories. -
- -### Enable IaC scanning for your repositories - -After you set up the GitHub integration, enable IaC scanning for the repositories in your GitHub account. - -1. On the [CSM Setup page][4], expand the **Source Code Integrations** section. -2. Click **Configure** for the GitHub account you want to configure. -3. To enable IaC scanning: - - All repositories: Toggle **Enable Infrastructure as Code (IaC) Scanning** to the on position. - - Single repository: Toggle the **IAC Scanning** option for the specific repository to the on position. +## Further reading -[1]: /security/cloud_security_management/misconfigurations -[2]: /security/cloud_security_management/identity_risks -[3]: /integrations/github/#link-a-repository-in-your-organization-or-personal-account -[4]: https://app.datadoghq.com/security/configuration/csm/setup +{{< partial name="whats-next/whats-next.html" >}} \ No newline at end of file diff --git a/content/en/security/cloud_security_management/misconfigurations/kspm.md b/content/en/security/cloud_security_management/misconfigurations/kspm.md index 09fad5a59d19d..b4060bfc9e4bb 100644 --- a/content/en/security/cloud_security_management/misconfigurations/kspm.md +++ b/content/en/security/cloud_security_management/misconfigurations/kspm.md @@ -22,14 +22,17 @@ To take full advantage of KSPM, you must install both the Datadog Agent and clou This allows Datadog to detect risks in your Kubernetes deployments for each of the following resource types: -| Resource Type | Install Method | Framework | -|--------------------------|-------------------|------------------| -| `aws_eks_cluster` | cloud integration | `cis-eks` | -| `aws_eks_worker_node` | Agent | `cis-eks` | -| `azure_aks_cluster` | cloud integration | `cis-aks` | -| `azure_aks_worker_node` | Agent | `cis-aks` | -| `kubernetes_master_node` | Agent | `cis-kubernetes` | -| `kubernetes_worker_node` | Agent | `cis-kubernetes` | +| Resource Type | Install Method | Framework | +|-----------------------------------|-------------------|------------------| +| `aws_eks_cluster` | Cloud integration | `cis-eks` | +| `aws_eks_worker_node` | Agent | `cis-eks` | +| `azure_aks_cluster` | Cloud integration | `cis-aks` | +| `azure_aks_worker_node` | Agent | `cis-aks` | +| `gcp_kubernetes_engine_cluster` | Cloud integration | `cis-gke` | +| `gcp_kubernetes_engine_node_pool` | Cloud integration | `cis-gke` | +| `gcp_gke_worker_node` | Agent | `cis-gke` | +| `kubernetes_master_node` | Agent | `cis-kubernetes` | +| `kubernetes_worker_node` | Agent | `cis-kubernetes` | ## Monitor risk across Kubernetes deployments diff --git a/content/en/security/cloud_security_management/setup/_index.md b/content/en/security/cloud_security_management/setup/_index.md index f20bc87d11ff9..c331f1f939f3f 100644 --- a/content/en/security/cloud_security_management/setup/_index.md +++ b/content/en/security/cloud_security_management/setup/_index.md @@ -35,6 +35,7 @@ To get started with Cloud Security Management (CSM), review the following: - [Deploy the Agent for additional coverage](#deploy-the-agent-for-additional-coverage) - [Enable additional features](#enable-additional-features) - [AWS CloudTrail Logs](#aws-cloudtrail-logs) + - [IaC scanning](#iac-scanning) - [IaC remediation](#iac-remediation) - [Deploy via cloud integrations](#deploy-via-cloud-integrations) - [Disable CSM](#disable-csm) @@ -109,11 +110,15 @@ For broader coverage and additional functionalities, deploy the Datadog Agent to ### AWS CloudTrail Logs -AWS CloudTrail Logs allows you to get the most out of [CSM Identity Risks][6]. With AWS CloudTrail Logs, you gain additional insights into the actual usage of cloud resources, helping you identify users and roles with significant gaps between provisioned and utilized permissions. For more information, see [Setting up AWS CloudTrail Logs for Cloud Security Management][4]. +Maximize the benefits of [CSM Identity Risks][6] with AWS CloudTrail Logs. Gain deeper insights into cloud resource usage, identifying users and roles with significant gaps between provisioned and utilized permissions. For more information, check out [Setting up AWS CloudTrail Logs for Cloud Security Management][4]. + +### IaC scanning + +Integrate Infrastructure as Code (IaC) scanning with GitHub to detect misconfigurations in Terraform-defined cloud resources. For more information, see [Setting up IaC Scanning for Cloud Security Management][10]. ### IaC remediation -With Infrastructure as Code (IaC) remediation, you can use Terraform to open a pull request in GitHub, applying code changes that fix a misconfiguration or identity risk. For more information, see [Setting up IaC Remediation for Cloud Security Management][5]. +Use IaC remediation with Terraform to create pull requests in GitHub, applying code changes that fix misconfigurations and mitigate identity risks. For more information, see [Setting up IaC Remediation for Cloud Security Management][5]. ### Deploy via cloud integrations @@ -138,4 +143,5 @@ For information on disabling CSM, see the following: [6]: /security/cloud_security_management/identity_risks [7]: /security/cloud_security_management/setup/cloud_accounts [8]: /security/cloud_security_management/troubleshooting/vulnerabilities/#disable-csm-vulnerabilities -[9]: /security/cloud_security_management/troubleshooting/threats/#disable-csm-threats \ No newline at end of file +[9]: /security/cloud_security_management/troubleshooting/threats/#disable-csm-threats +[10]: /security/cloud_security_management/setup/iac_scanning \ No newline at end of file diff --git a/content/en/security/cloud_security_management/setup/agentless_scanning/azure_resource_manager.md b/content/en/security/cloud_security_management/setup/agentless_scanning/azure_resource_manager.md index d48e2e1a2a3c6..62fdd9ca18bc5 100644 --- a/content/en/security/cloud_security_management/setup/agentless_scanning/azure_resource_manager.md +++ b/content/en/security/cloud_security_management/setup/agentless_scanning/azure_resource_manager.md @@ -42,7 +42,7 @@ Follow the instructions for setting up the [Datadog Azure integration][1]. 1. On the [Cloud Security Management Setup][3] page, click **Cloud Integrations** > **Azure**. 1. Locate your subscription's tenant, expand the list of subscriptions, and identify the subscription for which you want to disable Agentless Scanning. -1. Click the **Edit** button {{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}} and toggle **Vulnerability Scanning** to the off position. +1. Click the **Edit** button ({{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}}) and toggle **Vulnerability Scanning** to the off position. 1. Click **Done**. ## Uninstall with Azure Resource Manager diff --git a/content/en/security/cloud_security_management/setup/agentless_scanning/cloudformation.md b/content/en/security/cloud_security_management/setup/agentless_scanning/cloudformation.md index 6850371d0bcc6..0a260e6ca9ba6 100644 --- a/content/en/security/cloud_security_management/setup/agentless_scanning/cloudformation.md +++ b/content/en/security/cloud_security_management/setup/agentless_scanning/cloudformation.md @@ -38,7 +38,7 @@ If you've already [set up Cloud Security Management][3] and want to add a new cl {{% tab "Existing AWS account" %}} 1. On the [Cloud Security Management Setup][1] page, click **Cloud Integrations** > **AWS**. -1. Click the **Edit** button {{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}} for the AWS account where you want to deploy the Agentless scanner. +1. Click the **Edit** button ({{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}}) for the AWS account where you want to deploy the Agentless scanner. 1. Verify that **Enable Resource Scanning** is toggled on. If it isn't, switch the **Enable Resource Scanning** toggle to the on position and complete Steps 3-7 in [New AWS Account][2]. 1. In the **Agentless Scanning** section, toggle **Host Vulnerability Scanning**, **Container Vulnerability Scanning**, **Lambda Vulnerability Scanning**, and **Data Security Scanning** to the on position. 1. Click **Done**. @@ -65,7 +65,7 @@ Datadog recommends updating the CloudFormation stack regularly, so you can get a ## Disable Agentless Scanning 1. On the [Cloud Security Management Setup][3] page, click **Cloud Integrations** > **AWS**. -1. To disable Agentless Scanning for an account, click the **Edit** button {{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}} and toggle the **Agentless Scanning** section to the off position. +1. To disable Agentless Scanning for an account, click the **Edit** button ({{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}}) and toggle the **Agentless Scanning** section to the off position. 1. Click **Done**. ## Uninstall with CloudFormation diff --git a/content/en/security/cloud_security_management/setup/agentless_scanning/quick_start.md b/content/en/security/cloud_security_management/setup/agentless_scanning/quick_start.md index c8c95c9857bd6..f65258184ee97 100644 --- a/content/en/security/cloud_security_management/setup/agentless_scanning/quick_start.md +++ b/content/en/security/cloud_security_management/setup/agentless_scanning/quick_start.md @@ -47,7 +47,7 @@ Datadog recommends updating the CloudFormation stack regularly, so you can get a ## Disable Agentless Scanning 1. On the [Cloud Security Management Setup][10] page, click **Cloud Integrations** > **AWS**. -1. To disable Agentless Scanning for an account, click the **Edit** button {{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}} and toggle the **Agentless Scanning** section to the off position. +1. To disable Agentless Scanning for an account, click the **Edit** button ({{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}}) and toggle the **Agentless Scanning** section to the off position. 1. Click **Done**. ## Uninstall Agentless Scanning diff --git a/content/en/security/cloud_security_management/setup/agentless_scanning/terraform.md b/content/en/security/cloud_security_management/setup/agentless_scanning/terraform.md index b9218c6db07f0..d1a613f50881c 100644 --- a/content/en/security/cloud_security_management/setup/agentless_scanning/terraform.md +++ b/content/en/security/cloud_security_management/setup/agentless_scanning/terraform.md @@ -37,7 +37,7 @@ If you've already [set up Cloud Security Management][4] and want to add a new cl {{% tab "Existing AWS account" %}} 1. On the [Cloud Security Management Setup][1] page, click **Cloud Integrations > AWS**. -1. Click the **Edit scanning** button {{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}} for the AWS account where you want to deploy the Agentless scanner. +1. Click the **Edit scanning** button ({{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}}) for the AWS account where you want to deploy the Agentless scanner. 1. **Enable Resource Scanning** should already be toggled on. If it isn't, toggle **Enable Resource Scanning** to the on position. 1. In the **How would you like to set up Agentless Scanning?** section, select **Terraform**. 1. Follow the instructions for installing the [Datadog Agentless Scanner module][2]. @@ -72,7 +72,7 @@ If you've already [set up Cloud Security Management][4] and want to add a new cl ## Disable Agentless Scanning 1. On the [Cloud Security Management Setup][4] page, click **Cloud Integrations**, and then expand the **AWS** or **Azure** section. -1. To disable Agentless Scanning for an account, click the **Edit** button {{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}} and toggle **Vulnerability Scanning** to the off position. +1. To disable Agentless Scanning for an account, click the **Edit** button ({{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}}) and toggle **Vulnerability Scanning** to the off position. 1. Click **Done**. ## Uninstall with Terraform diff --git a/content/en/security/cloud_security_management/setup/cloud_integrations.md b/content/en/security/cloud_security_management/setup/cloud_integrations.md index 45423939ebcb3..589cf66da25a0 100644 --- a/content/en/security/cloud_security_management/setup/cloud_integrations.md +++ b/content/en/security/cloud_security_management/setup/cloud_integrations.md @@ -37,62 +37,38 @@ To enable resource scanning for your cloud accounts, you must first set up the i ## Disable resource scanning -To disable resource scanning for your cloud accounts, navigate to either the [**CSM Setup**][1] page or the cloud account integration page. Disabling resource scanning does not affect your ability to access historical findings. You can still review data from the past 15 months. +
You can access historical findings from the past 15 months even if resource scanning is disabled.
{{< tabs >}} {{% tab "AWS" %}} -### CSM Setup page - 1. On the [**Cloud Security Management Setup**][1] page, click **Cloud Integrations**. 1. Expand the **AWS** section. -1. To stop resource collection for an account, click the Edit button and switch the **Enable Resource Scanning** toggle to the off position. +1. To stop resource collection for an account, click the **Edit** button ({{< img src="security/csm/setup/edit-button.png" inline="true" style="width:24px;">}}) and switch the **Enable Resource Scanning** toggle to the off position. 1. Click **Done**. -### Amazon Web Services integration page - -1. On the [**Amazon Web Services Integration**][2] page, select an AWS account. -1. On the **Resource Collection** tab, clear the **Enable Cloud Security Management** checkbox. -1. Click Save. - [1]: https://app.datadoghq.com/security/configuration/csm/setup [2]: https://app.datadoghq.com/integrations/amazon-web-services {{% /tab %}} {{% tab "Azure" %}} -### CSM Setup page - 1. On the [**Cloud Security Management Setup**][1] page, click **Cloud Integrations**. 1. Expand the **Azure** section. 1. To stop resource collection for a subscription, switch the **Resource Scanning** toggle to the off position. 1. Click **Done**. -### Azure integration page - -1. On the [**Azure Integration**][2] page, select an app registration. -1. On the **Resource Collection** tab, clear the **Enable Cloud Security Management** checkbox. -1. Click Save. - [1]: https://app.datadoghq.com/security/configuration/csm/setup [2]: https://app.datadoghq.com/integrations/azure {{% /tab %}} {{% tab "Google Cloud" %}} -### CSM Setup page - 1. On the [**Cloud Security Management Setup**][1] page, click **Cloud Integrations**. 1. Expand the **GCP** section. 1. To stop resource collection for a project, switch the **Resource Scanning** toggle to the off position. 1. Click **Done**. -### Google Cloud Platform integration page - -1. On the [**Google Cloud Platform Integration**][2] page, select a Google Cloud account. -1. On the **Resource Collection** tab, clear the **Enable Cloud Security Management** checkbox. -1. Click **Save**. - [1]: https://app.datadoghq.com/security/configuration/csm/setup [2]: https://app.datadoghq.com/integrations/google-cloud-platform diff --git a/content/en/security/cloud_security_management/setup/iac_remediation.md b/content/en/security/cloud_security_management/setup/iac_remediation.md index ca6559872790b..91b3836dac768 100644 --- a/content/en/security/cloud_security_management/setup/iac_remediation.md +++ b/content/en/security/cloud_security_management/setup/iac_remediation.md @@ -16,6 +16,8 @@ further_reading: Use the following instructions to enable Infrastructure as Code (IaC) remediation for Cloud Security Management (CSM). IaC remediation is available for [CSM Misconfigurations][1] and [CSM Identity Risks][2]. +
Static IaC remediation supports GitHub for version control and Terraform for infrastructure as code.
+ ## Set up the GitHub integration Follow [the instructions][3] for creating a GitHub app for your organization. diff --git a/content/en/security/cloud_security_management/setup/iac_scanning.md b/content/en/security/cloud_security_management/setup/iac_scanning.md new file mode 100644 index 0000000000000..07cdc54ec0a08 --- /dev/null +++ b/content/en/security/cloud_security_management/setup/iac_scanning.md @@ -0,0 +1,47 @@ +--- +title: Setting up IaC Scanning for Cloud Security Management +further_reading: + - link: "/security/cloud_security_management/setup" + tag: "Documentation" + text: "Setting up Cloud Security Management" + - link: "/security/cloud_security_management/misconfigurations" + tag: "Documentation" + text: "CSM Misconfigurations" + - link: "/security/cloud_security_management/identity_risks" + tag: "Guide" + text: "CSM Identity Risks" +--- + +{{< callout url="https://www.datadoghq.com/product-preview/iac-security/" >}} + Static Infrastructure as Code (IaC) scanning is in Preview. To request access, complete the form. +{{< /callout >}} + +Use the following instructions to enable Infrastructure as Code (IaC) scanning for Cloud Security Management (CSM). IaC scanning is available for [CSM Misconfigurations][1] and [CSM Identity Risks][2]. + +
Static IaC scanning supports GitHub for version control and Terraform for infrastructure as code.
+ +## Set up the GitHub integration + +Follow [the instructions][3] for creating a GitHub app for your organization. + +
To use IaC scanning, you must give the Github App Read & Write permissions for Contents and Pull Requests. These permissions can be applied to all or select repositories. +
+ +## Enable IaC scanning for your repositories + +After you set up the GitHub integration, enable IaC scanning for the repositories in your GitHub account. + +1. On the [CSM Setup page][4], expand the **Source Code Integrations** section. +2. Click **Configure** for the GitHub account you want to configure. +3. To enable IaC: + - All repositories: Toggle **Enable Infrastructure as Code (IaC) Scanning** to the on position. + - Single repository: Toggle the **IAC Scanning** option for the specific repository to the on position. + +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} + +[1]: /security/cloud_security_management/misconfigurations +[2]: /security/cloud_security_management/identity_risks +[3]: /integrations/github/#link-a-repository-in-your-organization-or-personal-account +[4]: https://app.datadoghq.com/security/configuration/csm/setup diff --git a/content/en/security/cloud_security_management/setup/without_infrastructure_monitoring.md b/content/en/security/cloud_security_management/setup/without_infrastructure_monitoring.md index ba58d01ba715e..90f05937f82c9 100644 --- a/content/en/security/cloud_security_management/setup/without_infrastructure_monitoring.md +++ b/content/en/security/cloud_security_management/setup/without_infrastructure_monitoring.md @@ -2,7 +2,7 @@ title: Setting Up CSM without Infrastructure Monitoring --- -In addition to setting up CSM with or without an Agent, you can also set it up without Infrastructure Monitoring. +In addition to setting up Cloud Security Management (CSM) with or without an Agent, you can also set it up without Infrastructure Monitoring. ## Set up CSM on your AWS account @@ -11,8 +11,9 @@ In addition to setting up CSM with or without an Agent, you can also set it up w If you don't see the required account, add it by clicking **Add AWS Account(s)** and following the onscreen prompts. 1. To turn off infrastructure monitoring on the selected account, under the account number, navigate to the **Metric Collection** tab, then click the **disable metric collection** link. Then, click **Disable Metric Collection** to confirm. -1. To turn on CSM, on the **Resource Collection** tab, turn on the **Enable Resource Collection** toggle, and select the **Enable Cloud Security Management** checkbox. -1. Click **Save**. +1. On the **Resource Collection** tab, click **Enable** next to Cloud Security Management. You are redirected to the Cloud Security Management Setup page, and a setup dialog automatically opens for the selected account. +1. On the setup dialog, switch the **Enable Resource Scanning** toggle to the on position. +1. Click **Done** to complete the setup. **Note**: In your CSM settings, set up [resource evaluation filters][1] to limit the number of hosts you need security on. @@ -23,8 +24,9 @@ In addition to setting up CSM with or without an Agent, you can also set it up w If you don't see the required client ID, add it by clicking **Add New App Registration** and following the onscreen prompts. 1. To turn off infrastructure monitoring on the selected account, under the client ID, navigate to the **Metric Collection** tab, then turn off the **Enable Metric Collection** toggle. -1. To turn on CSM, on the **Resource Collection** tab, turn on the **Enable Resource Collection** toggle, and select the **Enable Cloud Security Management** checkbox. -1. Click **Save**. +1. On the **Resource Collection** tab, click **Enable** next to Cloud Security Management. You are redirected to the Cloud Security Management Setup page, which automatically scrolls to the selected Azure subscription in the Cloud Integrations section. +1. Switch the **Resource Scanning** toggle to the on position. +1. Click **Done** to complete the setup. **Note**: In your CSM settings, set up [resource evaluation filters][1] to limit the number of hosts you need security on. @@ -35,8 +37,9 @@ In addition to setting up CSM with or without an Agent, you can also set it up w If you don't see the required account, add it by clicking **Add GCP Account** and following the onscreen prompts. 1. To turn off infrastructure monitoring on the selected account, under the account name, navigate to the **Metric Collection** tab. Then, above the Metric Collection table, click **Disable All**. -1. To turn on CSM, on the **Resource Collection** tab, turn on the **Enable Resource Collection** toggle, and select the **Enable Cloud Security Management** checkbox. -1. Click **Save**. +1. On the **Resource Collection** tab, click **Enable** next to Cloud Security Management. You are redirected to the Cloud Security Management Setup page, which automatically scrolls to the selected Google Cloud Platform project in the Cloud Integrations section. +1. Switch the **Resource Scanning** toggle to the on position. +1. Click **Done** to complete the setup. **Note**: In your CSM settings, set up [resource evaluation filters][1] to limit the number of hosts you need security on. diff --git a/content/en/security/security_inbox.md b/content/en/security/security_inbox.md index 304584d3dd29e..c826c6a3835c1 100644 --- a/content/en/security/security_inbox.md +++ b/content/en/security/security_inbox.md @@ -70,16 +70,16 @@ Key features include: ## Customize Security Inbox to highlight crucial issues {{< callout url="https://www.datadoghq.com/product-preview/customize-your-security-inbox/" >}} - Vulnerability Pipeline is in Preview. To enroll in the Preview for Add to Security Inbox rules, click Request Access. + Automation Pipelines is in Preview. To enroll in the Preview for Add to Security Inbox rules, click Request Access. {{< /callout >}} -Vulnerability Pipeline enables you to configure rules that customize your Security Inbox, allowing you to highlight issues that are critical to your organization. By setting up these automated rules, you can streamline the management of newly discovered vulnerabilities, enhancing triage and remediation efforts at scale. Leveraging both the Vulnerability Pipeline and Add to Security Inbox rules, you can optimize your security operations in the following ways: +Automation Pipelines enables you to configure rules that customize your Security Inbox, allowing you to highlight issues that are critical to your organization. By setting up these automated rules, you can streamline the management of newly discovered vulnerabilities, enhancing triage and remediation efforts at scale. Leveraging both the Automation Pipelines and Add to Security Inbox rules, you can optimize your security operations in the following ways: - **Resurface issues not captured by default**: Highlight issues that might be missed by default or custom detection rules, ensuring no critical issue is overlooked. - **Strengthen compliance and address key system concerns**: Address concerns affecting regulatory compliance or important business systems, regardless of severity. - **Prioritize current risks**: Focus on immediate threats, such as identity risks after an incident or industry-wide vulnerabilities. -For more information, see [Vulnerability Pipeline][11] and [Add to Security Inbox Rules][12]. +For more information, see [Automation Pipelines][11] and [Add to Security Inbox Rules][12]. ## Further Reading @@ -95,5 +95,5 @@ For more information, see [Vulnerability Pipeline][11] and [Add to Security Inbo [8]: https://www.exploit-db.com/ [9]: https://nvd.nist.gov/ [10]: /security/cloud_security_management/severity_scoring/#csm-severity-scoring-framework -[11]: /security/vulnerability_pipeline/ -[12]: /security/vulnerability_pipeline/security_inbox \ No newline at end of file +[11]: /security/automation_pipelines/ +[12]: /security/automation_pipelines/security_inbox \ No newline at end of file diff --git a/content/en/serverless/aws_lambda/installation/go.md b/content/en/serverless/aws_lambda/installation/go.md index 87ba99ad208cd..cf6bbd73130de 100644 --- a/content/en/serverless/aws_lambda/installation/go.md +++ b/content/en/serverless/aws_lambda/installation/go.md @@ -125,11 +125,13 @@ import ( "net/http" "time" - ddlambda "github.com/DataDog/datadog-lambda-go" - "github.com/aws/aws-lambda-go/events" - "github.com/aws/aws-lambda-go/lambda" - httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + ddlambda "github.com/DataDog/datadog-lambda-go" + "github.com/aws/aws-lambda-go/events" + "github.com/aws/aws-lambda-go/lambda" + httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" // 1.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2" // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func main() { diff --git a/content/en/serverless/azure_app_services/azure_app_services_linux.md b/content/en/serverless/azure_app_services/azure_app_services_linux.md index d63ece3fa8fe8..d2dff15705931 100644 --- a/content/en/serverless/azure_app_services/azure_app_services_linux.md +++ b/content/en/serverless/azure_app_services/azure_app_services_linux.md @@ -59,7 +59,7 @@ Set these values in the `DD_START_APP` environment variable. Examples below are Go to **General settings** and add the following to the **Startup Command** field: ``` -curl -s https://raw.githubusercontent.com/DataDog/datadog-aas-linux/v1.10.13/datadog_wrapper | bash +curl -s https://raw.githubusercontent.com/DataDog/datadog-aas-linux/v1.10.14/datadog_wrapper | bash ``` {{< img src="serverless/azure_app_service/startup-command-1.jpeg" alt="Azure App Service Configuration: the Stack settings, under the Configuration section of Settings in the Azure UI. Underneath the stack, major version, and minor version fields is a 'Startup Command' field that is populated by the above curl command." style="width:100%;" >}} diff --git a/content/en/serverless/google_cloud_run/_index.md b/content/en/serverless/google_cloud_run/_index.md index 90a1e2bf21492..bb576c6660569 100644 --- a/content/en/serverless/google_cloud_run/_index.md +++ b/content/en/serverless/google_cloud_run/_index.md @@ -10,155 +10,855 @@ further_reading: ## Overview -Google Cloud Run is a fully managed serverless platform for deploying and scaling container-based applications. Datadog provides monitoring and log collection for Cloud Run through the [Google Cloud integration][1]. Datadog also provides a solution for instrumenting your Cloud Run applications with a purpose-built Agent to enable tracing, custom metrics, and direct log collection. +Google Cloud Run is a fully managed serverless platform for deploying and scaling container-based applications. Datadog provides monitoring and log collection for Cloud Run through the [Google Cloud integration][1]. -### Prerequisites +
To instrument your Google Cloud Run applications with serverless-init, see Instrument Google Cloud Run with serverless-init.
-Make sure you have a [Datadog API Key][6] and are using a programming language [supported by a Datadog tracing library][2]. +## Setup -## Instrument your application +### Application -You can instrument your application in one of two ways: [Dockerfile](#dockerfile) or [buildpack](#buildpack). +{{< tabs >}} +{{% tab "Node.js" %}} +#### Tracing -### Dockerfile +In your main application, add the `dd-trace-js` library. See [Tracing Node.js applications][1] for instructions. -Datadog publishes new releases of the `serverless-init` container image to Google’s gcr.io, Azure ACR, AWS’ ECR, and on Docker Hub: +Set `ENV NODE_OPTIONS="--require dd-trace/init"`. This specifies that the `dd-trace/init` module is required when the Node.js process starts. -| dockerhub.io | gcr.io | public.ecr.aws | datadoghq.azurecr.io | -| ----------------------- | -------------------------------- | -------------------------------------- | ------------------------------------ | -| datadog/serverless-init | gcr.io/datadoghq/serverless-init | public.ecr.aws/datadog/serverless-init | datadoghq.azurecr.io/serverless-init | +#### Metrics +The tracing library also collects custom metrics. See the [code examples][2]. -Images are tagged based on semantic versioning, with each new version receiving three relevant tags: +#### Logs +The Datadog sidecar collects logs through a shared volume. To forward logs from your main container to the sidecar, configure your application to write all logs to a location such as `shared-volume/logs/*.log` using the steps below. You must follow the setup in the GCP UI to add the environment variable `DD_SERVERLESS_LOG_PATH` and a shared Volume Mount to both the main and sidecar container. If you decide to deploy using YAML or Terraform, the environment variables, health check, and volume mount are already added. -* `1`, `1-alpine`: use these to track the latest minor releases, without breaking changes -* `1.x.x`, `1.x.x-alpine`: use these to pin to a precise version of the library -* `latest`, `latest-alpine`: use these to follow the latest version release, which may include breaking changes +To set up logging in your application, see [Node.js Log Collection][3]. To set up trace log correlation, see [Correlating Node.js Logs and Traces][4]. -## How `serverless-init` works +[1]: /tracing/trace_collection/automatic_instrumentation/dd_libraries/nodejs/#getting-started +[2]: /metrics/custom_metrics/dogstatsd_metrics_submission/#code-examples +[3]: /logs/log_collection/nodejs/?tab=winston30 +[4]: /tracing/other_telemetry/connect_logs_and_traces/nodejs -The `serverless-init` application wraps your process and executes it as a subprocess. It starts a DogStatsD listener for metrics and a Trace Agent listener for traces. It collects logs by wrapping the stdout/stderr streams of your application. After bootstrapping, serverless-init then launches your command as a subprocess. +{{% /tab %}} +{{% tab "Python" %}} +#### Tracing -To get full instrumentation, ensure you are calling `datadog-init` as the first command that runs inside your Docker container. You can do this through by setting it as the entrypoint, or by setting it as the first argument in CMD. +In your main application, add the `dd-trace-py` library. See [Tracing Python Applications][1] for instructions. You can also use [Tutorial - Enabling Tracing for a Python Application and Datadog Agent in Containers][5]. -{{< programming-lang-wrapper langs="nodejs,python,java,go,dotnet,ruby,php" >}} -{{< programming-lang lang="nodejs" >}} +#### Metrics +The tracing library also collects custom metrics. See the [code examples][2]. -{{% svl-init-nodejs %}} +#### Logs +The Datadog sidecar collects logs through a shared volume. To forward logs from your main container to the sidecar, configure your application to write all logs to a location such as `shared-volume/logs/*.log` using the steps below. You must follow the setup in the GCP UI to add the environment variable `DD_SERVERLESS_LOG_PATH` and a shared Volume Mount to both the main and sidecar container. If you decide to deploy using YAML or Terraform, the environment variables, health check, and volume mount are already added. -{{< /programming-lang >}} -{{< programming-lang lang="python" >}} +To set up logging in your application, see [Python Log Collection][3]. [Python Logging Best Practices][6] can also be helpful. To set up trace log correlation, see [Correlating Python Logs and Traces][4]. -{{% svl-init-python %}} +[1]: /tracing/trace_collection/automatic_instrumentation/dd_libraries/python +[2]: /metrics/custom_metrics/dogstatsd_metrics_submission/#code-examples +[3]: /logs/log_collection/python +[4]: /tracing/other_telemetry/connect_logs_and_traces/python +[5]: /tracing/guide/tutorial-enable-python-containers/ +[6]: https://www.datadoghq.com/blog/python-logging-best-practices/ -{{< /programming-lang >}} -{{< programming-lang lang="java" >}} +{{% /tab %}} +{{% tab "Java" %}} +#### Tracing -{{% svl-init-java %}} +In your main application, add the `dd-trace-java` library. Follow the instructions in [Tracing Java Applications][1] or use the following example Dockerfile to add and start the tracing library with automatic instrumentation: -{{< /programming-lang >}} -{{< programming-lang lang="go" >}} +```dockerfile +FROM eclipse-temurin:17-jre-jammy +WORKDIR /app +COPY target/cloudrun-java-1.jar cloudrun-java-1.jar -{{% svl-init-go %}} -{{< /programming-lang >}} -{{< programming-lang lang="dotnet" >}} +# Add the Datadog tracer +ADD 'https://dtdg.co/latest-java-tracer' dd-java-agent.jar -{{% svl-init-dotnet %}} -{{< /programming-lang >}} -{{< programming-lang lang="ruby" >}} +EXPOSE 8080 -{{% svl-init-ruby %}} -{{< /programming-lang >}} -{{< programming-lang lang="php" >}} +# Start the Datadog tracer with the javaagent argument +ENTRYPOINT [ "java", "-javaagent:dd-java-agent.jar", "-jar", "cloudrun-java-1.jar" ] +``` + +#### Metrics +To collect custom metrics, [install the Java DogStatsD client][2]. + +#### Logs +The Datadog sidecar collects logs through a shared volume. To forward logs from your main container to the sidecar, configure your application to write all logs to a location such as `shared-volume/logs/*.log` using the steps below. You must follow the setup in the GCP UI to add the environment variable `DD_SERVERLESS_LOG_PATH` and a shared Volume Mount to both the main and sidecar container. If you decide to deploy using YAML or Terraform, the environment variables, health check, and volume mount are already added. + +To set up logging in your application, see [Java Log Collection][3]. To set up trace log correlation, see [Correlating Java Logs and Traces][4]. + +[1]: /tracing/trace_collection/automatic_instrumentation/dd_libraries/java/#getting-started +[2]: /developers/dogstatsd/?tab=hostagent&code-lang=java#install-the-dogstatsd-client +[3]: /logs/log_collection/java/?tab=winston30 +[4]: /tracing/other_telemetry/connect_logs_and_traces/java + +{{% /tab %}} +{{% tab "Go" %}} +#### Tracing + +In your main application, add the `dd-trace-go` library. See [Tracing Go Applications][1] for instructions. -{{% svl-init-php %}} +#### Metrics +The tracing library also collects custom metrics. See the [code examples][2]. -{{< /programming-lang >}} -{{< /programming-lang-wrapper >}} +#### Logs +The Datadog sidecar collects logs through a shared volume. To forward logs from your main container to the sidecar, configure your application to write all logs to a location such as `shared-volume/logs/*.log` using the steps below. You must follow the setup in the GCP UI to add the environment variable `DD_SERVERLESS_LOG_PATH` and a shared Volume Mount to both the main and sidecar container. If you decide to deploy using YAML or Terraform, the environment variables, health check, and volume mount are already added. -### Buildpack +To set up logging in your application, see [Go Log Collection][3]. To set up trace log correlation, see [Correlating Go Logs and Traces][4]. -[`Pack Buildpacks`][3] provide a convenient way to package your container without using a Dockerfile. +[1]: /tracing/trace_collection/automatic_instrumentation/dd_libraries/go/ +[2]: /metrics/custom_metrics/dogstatsd_metrics_submission/#code-examples +[3]: /logs/log_collection/go +[4]: /tracing/other_telemetry/connect_logs_and_traces/go +{{% /tab %}} +{{% tab ".NET" %}} +#### Tracing -First, manually install your tracer: -- [Node.JS][14] -- [Python][13] -- [Java][15] -- [Go][12] -- [.NET][18] -- [Ruby][16] -- [PHP][17] +In your main application, add the .NET tracing library. See [Tracing .NET Applications][1] for instructions. -Then, build your application by running the following command: +Example Dockerfile: -```shell -pack build --builder=gcr.io/buildpacks/builder \ ---buildpack from=builder \ ---buildpack datadog/serverless-buildpack:latest \ -gcr.io/YOUR_PROJECT/YOUR_APP_NAME +```dockerfile +FROM mcr.microsoft.com/dotnet/aspnet:8.0-jammy +WORKDIR /app +COPY ./bin/Release/net8.0/publish /app + +ADD https://github.com/DataDog/dd-trace-dotnet/releases/download/v2.56.0/datadog-dotnet-apm_2.56.0_amd64.deb /opt/datadog/datadog-dotnet-apm_2.56.0_amd64.deb +RUN dpkg -i /opt/datadog/datadog-dotnet-apm_2.56.0_amd64.deb +RUN mkdir -p /shared-volume/logs/ + +ENV CORECLR_ENABLE_PROFILING=1 +ENV CORECLR_PROFILER={846F5F1C-F9AE-4B07-969E-05C26BC060D8} +ENV CORECLR_PROFILER_PATH=/opt/datadog/Datadog.Trace.ClrProfiler.Native.so +ENV DD_DOTNET_TRACER_HOME=/opt/datadog/ + +ENV DD_TRACE_DEBUG=true + +ENTRYPOINT ["dotnet", "dotnet.dll"] ``` -**Note**: Buildpack instrumentation is not compatible with Alpine images +#### Metrics +The tracing library also collects custom metrics. See the [code examples][2]. + +#### Logs +The Datadog sidecar collects logs through a shared volume. To forward logs from your main container to the sidecar, configure your application to write all logs to a location such as `shared-volume/logs/*.log` using the steps below. You must follow the setup in the GCP UI to add the environment variable `DD_SERVERLESS_LOG_PATH` and a shared Volume Mount to both the main and sidecar container. If you decide to deploy using YAML or Terraform, the environment variables, health check, and volume mount are already added. + +To set up logging in your application, see [C# Log Collection][3]. To set up trace log correlation, see [Correlating .NET Logs and Traces][4]. + +[1]: /tracing/trace_collection/automatic_instrumentation/dd_libraries/dotnet-core/?tab=linux#enable-the-tracer-for-your-service +[2]: https://www.datadoghq.com/blog/statsd-for-net-dogstatsd/ +[3]: /log_collection/csharp/?tab=serilog +[4]: /tracing/other_telemetry/connect_logs_and_traces/dotnet/?tab=serilog +{{% /tab %}} +{{% tab "PHP" %}} +In your main application, add the `dd-trace-php` library. See [Tracing PHP Applications][1] for instructions. + +#### Metrics +The tracing library also collects custom metrics. See the [code examples][2]. + +#### Logs +The Datadog sidecar collects logs through a shared volume. To forward logs from your main container to the sidecar, configure your application to write all logs to a location such as `shared-volume/logs/*.log` using the steps below. You must follow the setup in the GCP UI to add the environment variable `DD_SERVERLESS_LOG_PATH` and a shared Volume Mount to both the main and sidecar container. If you decide to deploy using YAML or Terraform, the environment variables, health check, and volume mount are already added. + +To set up logging in your application, see [PHP Log Collection][3]. To set up trace log correlation, see [Correlating PHP Logs and Traces][4]. + +[1]: /tracing/trace_collection/automatic_instrumentation/dd_libraries/php/ +[2]: /metrics/custom_metrics/dogstatsd_metrics_submission/#code-examples +[3]: /logs/log_collection/php +[4]: /tracing/other_telemetry/connect_logs_and_traces/php +{{% /tab %}} +{{< /tabs >}} + +### Containers +{{< tabs >}} +{{% tab "GCR UI" %}} + +#### Sidecar container + +1. In Cloud Run, select **Edit & Deploy New Revision**. +1. At the bottom of the page, select **Add Container**. +1. For **Container image URL**, select `gcr.io/datadoghq/serverless-init:latest`. +1. Go to **Volume Mounts** and set up a volume mount for logs. Ensure that the mount path matches your application's write location. For example: + {{< img src="serverless/gcr/volume_mount.png" width="80%" alt="Volume Mounts tab. Under Mounted volumes, Volume Mount 1. For Name 1, 'shared-logs (In-Memory)' is selected. For Mount path 1, '/shared-volume' is selected.">}} +1. Go to **Settings** and add a startup check. + - **Select health check type**: Startup check + - **Select probe type**: TCP + - **Port**: Enter a port number. Make note of this, as it is used in the next step. +1. Go to **Variables & Secrets** and add the following environment variables as name-value pairs: + - `DD_SERVICE`: A name for your service. For example, `gcr-sidecar-test`. + - `DD_ENV`: A name for your environment. For example, `dev`. + - `DD_SERVERLESS_LOG_PATH`: Your log path. For example, `/shared-volume/logs/*.log`. + - `DD_API_KEY`: Your [Datadog API key][1]. + - `DD_HEALTH_PORT`: The port you selected for the startup check in the previous step. + + For a list of all environment variables, including additional tags, see [Environment variables](#environment-variables). + +#### Main container + +1. Go to **Volume Mounts** and add the same shared volume as you did for the sidecar container. + **Note**: Save your changes by selecting **Done**. Do not deploy changes until the final step. +1. Go to **Variables & Secrets** and add the same [environment variables](#environment-variables) that you set for the sidecar container. Omit `DD_HEALTH_PORT`. +1. Go to **Settings**. In the **Container start up order** drop-down menu, select your sidecar. +1. Deploy your main application. + +[1]: https://app.datadoghq.com/organization-settings/api-keys + +{{% /tab %}} +{{% tab "YAML deploy" %}} +To deploy your Cloud Run service with a YAML service specification: + +1. Create a YAML file that contains the following: + + ```yaml + apiVersion: serving.knative.dev/v1 + kind: Service + metadata: + name: '' + labels: + cloud.googleapis.com/location: '' + spec: + template: + metadata: + annotations: + autoscaling.knative.dev/maxScale: '100' # The maximum number of instances that can be created for this service. https://cloud.google.com/run/docs/reference/rest/v1/RevisionTemplate + run.googleapis.com/container-dependencies: '{"run-sidecar-1":["serverless-init-1"]}' # Configure container start order for sidecar deployments https://cloud.google.com/run/docs/configuring/services/containers#container-ordering + run.googleapis.com/startup-cpu-boost: 'true' # The startup CPU boost feature for revisions provides additional CPU during instance startup time and for 10 seconds after the instance has started. https://cloud.google.com/run/docs/configuring/services/cpu#startup-boost + spec: + containers: + - env: + - name: DD_SERVERLESS_LOG_PATH + value: shared-volume/logs/*.log + - name: DD_SITE + value: '' + - name: DD_ENV + value: serverless + - name: DD_API_KEY + value: '' + - name: DD_SERVICE + value: '' + - name: DD_VERSION + value: '' + - name: DD_LOG_LEVEL + value: debug + - name: DD_LOGS_INJECTION + value: 'true' + image: '' + name: run-sidecar-1 + ports: + - containerPort: 8080 + name: http1 + resources: + limits: + cpu: 1000m + memory: 512Mi + startupProbe: + failureThreshold: 1 + periodSeconds: 240 + tcpSocket: + port: 8080 + timeoutSeconds: 240 + volumeMounts: + - mountPath: /shared-volume + name: shared-volume + - env: + - name: DD_SERVERLESS_LOG_PATH + value: shared-volume/logs/*.log + - name: DD_SITE + value: datadoghq.com + - name: DD_ENV + value: serverless + - name: DD_API_KEY + value: '' + - name: DD_SERVICE + value: '' + - name: DD_VERSION + value: '' + - name: DD_LOG_LEVEL + value: debug + - name: DD_LOGS_INJECTION + value: 'true' + - name: DD_HEALTH_PORT + value: '12345' + image: gcr.io/datadoghq/serverless-init:latest + name: serverless-init-1 + resources: + limits: + cpu: 1000m + memory: 512Mi # Can be updated to a higher memory if needed + startupProbe: + failureThreshold: 3 + periodSeconds: 10 + tcpSocket: + port: 12345 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /shared-volume + name: shared-volume + volumes: + - emptyDir: + medium: Memory + sizeLimit: 512Mi + name: shared-volume + traffic: # make this revision and all future ones serve 100% of the traffic as soon as possible, overriding any established traffic split + - latestRevision: true + percent: 100 + ``` + In this example, the environment variables, startup health check, and volume mount are already added. If you don't want to enable logs, remove the shared volume. Ensure the container port for the main container is the same as the one exposed in your Dockerfile/service. +1. Supply placeholder values: + - ``: A name for your service. For example, `gcr-sidecar-test`. See [Unified Service Tagging][2]. + - ``: The region you are deploying your service in. For example, `us-central`. + - ``: Your [Datadog site][3], {{< region-param key="dd_site" code="true" >}}. + - ``: Your [Datadog API key][1]. + - ``: The version number of your deployment. See [Unified Service Tagging][2]. + - ``: The image of the code you are deploying to Cloud Run. For example, `us-docker.pkg.dev/cloudrun/container/hello`. + - ``: The name of your Google Cloud service account. + +1. Run: + ```bash + gcloud run services replace .yaml + ``` + +[1]: https://app.datadoghq.com/organization-settings/api-keys +[2]: /getting_started/tagging/unified_service_tagging/ +[3]: /getting_started/site/ + +{{% /tab %}} +{{% tab "Terraform deploy" %}} +To deploy your Cloud Run service with Terraform, use the following example configuration file. In this example, the environment variables, startup health check, and volume mount are already added. If you don't want to enable logs, remove the shared volume. Ensure the container port for the main container is the same as the one exposed in your Dockerfile/service. If you do not want to allow public access, remove the IAM policy section. -## Configure your application +``` +provider "google" { + project = "" + region = "" # example: us-central1 +} + +resource "google_cloud_run_service" "terraform_with_sidecar" { + name = "" + location = "" + + template { + metadata { + annotations = { + # Correctly formatted container-dependencies annotation + "run.googleapis.com/container-dependencies" = jsonencode({main-app = ["sidecar-container"]}) + } + } + spec { + # Define shared volume + volumes { + name = "shared-volume" + empty_dir { + medium = "Memory" + } + } + + # Main application container + containers { + name = "main-app" + image = "" + + # Expose a port for the main container + ports { + container_port = 8080 + } + # Mount the shared volume + volume_mounts { + name = "shared-volume" + mount_path = "/shared-volume" + } + + # Startup Probe for TCP Health Check + startup_probe { + tcp_socket { + port = 8080 + } + initial_delay_seconds = 0 # Delay before the probe starts + period_seconds = 10 # Time between probes + failure_threshold = 3 # Number of failures before marking as unhealthy + timeout_seconds = 1 # Number of failures before marking as unhealthy + } + + # Environment variables for the main container + env { + name = "DD_SITE" + value = "" + } + env { + name = "DD_SERVERLESS_LOG_PATH" + value = "shared-volume/logs/*.log" + } + env { + name = "DD_ENV" + value = "serverless" + } + env { + name = "DD_API_KEY" + value = "" + } + env { + name = "DD_SERVICE" + value = "" + } + env { + name = "DD_VERSION" + value = "" + } + env { + name = "DD_LOG_LEVEL" + value = "debug" + } + env { + name = "DD_LOGS_INJECTION" + value = "true" + } + env { + name = "FUNCTION_TARGET" + value = "" # only needed for cloud run functions + } + + # Resource limits for the main container + resources { + limits = { + memory = "512Mi" + cpu = "1" + } + } + } + + # Sidecar container + containers { + name = "sidecar-container" + image = "gcr.io/datadoghq/serverless-init:latest" + + # Mount the shared volume + volume_mounts { + name = "shared-volume" + mount_path = "/shared-volume" + } + + # Startup Probe for TCP Health Check + startup_probe { + tcp_socket { + port = 12345 + } + initial_delay_seconds = 0 # Delay before the probe starts + period_seconds = 10 # Time between probes + failure_threshold = 3 # Number of failures before marking as unhealthy + timeout_seconds = 1 + } + + # Environment variables for the main container + env { + name = "DD_SITE" + value = "" + } + env { + name = "DD_SERVERLESS_LOG_PATH" + value = "shared-volume/logs/*.log" + } + env { + name = "DD_ENV" + value = "serverless" + } + env { + name = "DD_API_KEY" + value = "" + } + env { + name = "DD_SERVICE" + value = "" + } + env { + name = "DD_VERSION" + value = "" + } + env { + name = "DD_LOG_LEVEL" + value = "debug" + } + env { + name = "DD_LOGS_INJECTION" + value = "true" + } + env { + name = "FUNCTION_TARGET" + value = "" # only needed for cloud run functions + } + env { + name = "DD_HEALTH_PORT" + value = "12345" + } + + # Resource limits for the sidecar + resources { + limits = { + memory = "512Mi" + cpu = "1" + } + } + } + } + } + + # Define traffic splitting + traffic { + percent = 100 + latest_revision = true + } +} + +# IAM Member to allow public access (optional, adjust as needed) +resource "google_cloud_run_service_iam_member" "invoker" { + service = google_cloud_run_service.terraform_with_sidecar.name + location = google_cloud_run_service.terraform_with_sidecar.location + role = "roles/run.invoker" + member = "allUsers" +} +``` -Once the container is built and pushed to your registry, the last step is to set the required environment variables for the Datadog Agent: -- `DD_API_KEY`: Datadog API key, used to send data to your Datadog account. It should be configured as a [Google Cloud Secret][11] for privacy and safety issue. -- `DD_SITE`: Datadog endpoint and website. Select your site on the right side of this page. Your site is: {{< region-param key="dd_site" code="true" >}}. -- `DD_TRACE_ENABLED`: set to `true` to enable tracing -- `DD_TRACE_PROPAGATION_STYLE`: Set this to `datadog` to use context propagation and log trace correlation. +Supply placeholder values: +- ``: Your Google Cloud project ID. +- ``: The region you are deploying your service in. For example, `us-central1`. +- ``: A name for your service. For example, `gcr-sidecar-test`. See [Unified Service Tagging][2]. +- ``: The image of the code you are deploying to Cloud Run. +- ``: Your [Datadog site][3], {{< region-param key="dd_site" code="true" >}}. +- ``: Your [Datadog API key][1]. +- ``: The version number of your deployment. See [Unified Service Tagging][2]. + +[1]: https://app.datadoghq.com/organization-settings/api-keys +[2]: /getting_started/tagging/unified_service_tagging/ +[3]: /getting_started/site/ +{{% /tab %}} +{{< /tabs >}} + +## Environment variables + +| Variable | Description | +| -------- | ----------- | +|`DD_API_KEY`| [Datadog API key][4] - **Required**| +| `DD_SITE` | [Datadog site][5] - **Required** | +| `DD_LOGS_INJECTION`| When true, enrich all logs with trace data for supported loggers in [Java][6], [Node][7], [.NET][8], and [PHP][9]. See additional docs for [Python][10], [Go][11], and [Ruby][12]. | +| `DD_SERVICE` | See [Unified Service Tagging][13]. | +| `DD_VERSION` | See [Unified Service Tagging][13]. | +| `DD_ENV` | See [Unified Service Tagging][13]. | +| `DD_SOURCE` | See [Unified Service Tagging][13]. | +| `DD_TAGS` | See [Unified Service Tagging][13]. | + +Do not use the `DD_LOGS_ENABLED` environment variable. This variable is only used for the [serverless-init][14] install method. + +## Example application + +The following example contains a single app with tracing, metrics, and logs set up. + +{{< tabs >}} +{{% tab "Node.js" %}} + +```js +const tracer = require('dd-trace').init({ + logInjection: true, +}); +const express = require("express"); +const app = express(); +const { createLogger, format, transports } = require('winston'); + +const logger = createLogger({ + level: 'info', + exitOnError: false, + format: format.json(), + transports: [new transports.File({ filename: `/shared-volume/logs/app.log`}), + ], +}); + +app.get("/", (_, res) => { + logger.info("Welcome!"); + res.sendStatus(200); +}); + +app.get("/hello", (_, res) => { + logger.info("Hello!"); + metricPrefix = "nodejs-cloudrun"; + // Send three unique metrics, just so we're testing more than one single metric + metricsToSend = ["sample_metric_1", "sample_metric_2", "sample_metric_3"]; + metricsToSend.forEach((metric) => { + for (let i = 0; i < 20; i++) { + tracer.dogstatsd.distribution(`${metricPrefix}.${metric}`, 1); + } + }); + res.status(200).json({ msg: "Sending metrics to Datadog" }); +}); + +const port = process.env.PORT || 8080; +app.listen(port); +``` -For more environment variables and their function, see [Additional Configurations](#additional-configurations). +{{% /tab %}} +{{% tab "Python" %}} + +### app.py + +```python +import ddtrace +from flask import Flask, render_template, request +import logging +from datadog import initialize, statsd + +ddtrace.patch(logging=True) +app = Flask(__name__) +options = { + 'statsd_host':'127.0.0.1', + 'statsd_port':8125 +} +FORMAT = ('%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] ' + '[dd.service=%(dd.service)s dd.env=%(dd.env)s dd.version=%(dd.version)s dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s] ' + '- %(message)s') +logging.basicConfig(level=logging.DEBUG, filename='app.log', format=FORMAT) +logger = logging.getLogger(__name__) +logger.level = logging.INFO + +ddlogs = [] + +@ddtrace.tracer.wrap(service="dd_gcp_log_forwader") +@app.route('/', methods=["GET"]) +def index(): + log = request.args.get("log") + if log != None: + with tracer.trace('sending_logs') as span: + statsd.increment('dd.gcp.logs.sent') + span.set_tag('logs', 'nina') + logger.info(log) + ddlogs.append(log) + return render_template("home.html", logs=ddlogs) + +if __name__ == '__main__': + tracer.configure(port=8126) + initialize(**options) + app.run(debug=True) +``` -The following command deploys the service and allows any external connection to reach it. Set `DD_API_KEY` as an environment variable, and set your service listening to port 8080. +### Home.html +```html + + + + + + + Datadog Test + + +

Welcome to Datadog!💜

+
+ + +
+

Logs Sent to Datadog:

+
    + {% for log in logs%} + {% if log %} +
  • {{ log }}
  • + {% endif %} + {% endfor %} +
+ + +``` +{{% /tab %}} +{{% tab "Java" %}} + +```java +package com.example.springboot; + +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +import com.timgroup.statsd.NonBlockingStatsDClientBuilder; +import com.timgroup.statsd.StatsDClient; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +@RestController +public class HelloController { + Private static final StatsDClient Statsd = new NonBlockingStatsDClientBuilder().hostname("localhost").build(); + private static final Log logger = LogFactory.getLog(HelloController.class); + @GetMapping("/") + public String index() { + Statsd.incrementCounter("page.views"); + logger.info("Hello Cloud Run!"); + return "💜 Hello Cloud Run! 💜"; + } +} +``` +{{% /tab %}} +{{% tab "Go" %}} +```go +package main + + +import ( + "fmt" + "log" + "net/http" + "os" + "path/filepath" + + + "github.com/DataDog/datadog-go/v5/statsd" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +) + + +const logDir = "/shared-volume/logs" + +var logFile *os.File +var logCounter int +var dogstatsdClient *statsd.Client + +func handler(w http.ResponseWriter, r *http.Request) { + log.Println("Yay!! Main container works") + span := tracer.StartSpan("maincontainer", tracer.ResourceName("/handler")) + defer span.Finish() + logCounter++ + writeLogsToFile(fmt.Sprintf("received request %d", logCounter), span.Context()) + dogstatsdClient.Incr("request.count", []string{"test-tag"}, 1) +} + +func writeLogsToFile(log_msg string, context ddtrace.SpanContext) { + span := tracer.StartSpan( + "writeLogToFile", + tracer.ResourceName("/writeLogsToFile"), + tracer.ChildOf(context)) + defer span.Finish() + _, err := logFile.WriteString(log_msg + "\n") + if err != nil { + log.Println("Error writing to log file:", err) + } +} + +func main() { + log.Print("Main container started...") + + err := os.MkdirAll(logDir, 0755) + if err != nil { + panic(err) + } + logFilePath := filepath.Join(logDir, "maincontainer.log") + log.Println("Saving logs in ", logFilePath) + logFileLocal, err := os.OpenFile(logFilePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + if err != nil { + panic(err) + } + defer logFileLocal.Close() + + logFile = logFileLocal + + dogstatsdClient, err = statsd.New("localhost:8125") + if err != nil { + panic(err) + } + defer dogstatsdClient.Close() + + tracer.Start() + defer tracer.Stop() + + http.HandleFunc("/", handler) + log.Fatal(http.ListenAndServe(":8080", nil)) +} ``` -shell -gcloud run deploy APP_NAME --image=gcr.io/YOUR_PROJECT/APP_NAME \ - --port=8080 \ - --update-env-vars=DD_API_KEY=$DD_API_KEY \ - --update-env-vars=DD_TRACE_ENABLED=true \ - --update-env-vars=DD_SITE='datadoghq.com' \ - --update-env-vars=DD_TRACE_PROPAGATION_STYLE='datadog' \ +{{% /tab %}} +{{% tab ".NET" %}} +```csharp +using Microsoft.AspNetCore.Mvc; +using Microsoft.AspNetCore.Mvc.RazorPages; +using Serilog; +using Serilog.Formatting.Json; +using Serilog.Formatting.Compact; +using Serilog.Sinks.File; +using StatsdClient; + + +namespace dotnet.Pages; + + +public class IndexModel : PageModel +{ + private readonly static DogStatsdService _dsd; + static IndexModel() + { + var dogstatsdConfig = new StatsdConfig + { + StatsdServerName = "127.0.0.1", + StatsdPort = 8125, + }; + + + _dsd = new DogStatsdService(); + _dsd.Configure(dogstatsdConfig); + + + Log.Logger = new LoggerConfiguration() + .WriteTo.File(new RenderedCompactJsonFormatter(), "/shared-volume/logs/app.log") + .CreateLogger(); + } + public void OnGet() + { + _dsd.Increment("page.views"); + Log.Information("Hello Cloud Run!"); + } +} ``` +{{% /tab %}} +{{% tab "PHP" %}} -## Results +```php +Serverless** to see your serverless metrics and traces. -## Additional configurations +require __DIR__ . '/vendor/autoload.php'; -- **Advanced Tracing:** The Datadog Agent already provides some basic tracing for popular frameworks. Follow the [advanced tracing guide][2] for more information. -- **Logs:** If you use the [Google Cloud integration][1], your logs are already being collected. Alternatively, you can set the `DD_LOGS_ENABLED` environment variable to `true` to capture application logs through the serverless instrumentation directly. +use DataDog\DogStatsd; +use Monolog\Logger; +use Monolog\Handler\StreamHandler; +use Monolog\Formatter\JsonFormatter; -- **Custom Metrics:** You can submit custom metrics using a [DogStatsd client][4]. For monitoring Cloud Run and other serverless applications, use [distribution][9] metrics. Distributions provide `avg`, `sum`, `max`, `min`, and `count` aggregations by default. On the Metric Summary page, you can enable percentile aggregations (p50, p75, p90, p95, p99) and also manage tags. To monitor a distribution for a gauge metric type, use `avg` for both the [time and space aggregations][11]. To monitor a distribution for a count metric type, use `sum` for both the time and space aggregations. -### Environment Variables +$statsd = new DogStatsd( + array('host' => '127.0.0.1', + 'port' => 8125, + ) + ); -| Variable | Description | -| ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `DD_API_KEY` | [Datadog API Key][7] - **Required** | -| `DD_SITE` | [Datadog site][5] - **Required** | -| `DD_LOGS_ENABLED` | When true, send logs (stdout and stderr) to Datadog. Defaults to false. | -| `DD_LOGS_INJECTION` | When true, enrich all logs with trace data for supported loggers in [Java][19], [Node][20], [.NET][21], and [PHP][22]. See additional docs for [Python][23], [Go][24], and [Ruby][25]. | -| `DD_TRACE_SAMPLE_RATE` | Controls the trace ingestion sample rate `0.0` and `1.0`. | -| `DD_SERVICE` | See [Unified Service Tagging][6]. | -| `DD_VERSION` | See [Unified Service Tagging][6]. | -| `DD_ENV` | See [Unified Service Tagging][6]. | -| `DD_SOURCE` | See [Unified Service Tagging][6]. | -| `DD_TAGS` | See [Unified Service Tagging][6]. | -## Troubleshooting +$log = new logger('datadog'); +$formatter = new JsonFormatter(); -This integration depends on your runtime having a full SSL implementation. If you are using a slim image, you may need to add the following command to your Dockerfile to include certificates. -``` -RUN apt-get update && apt-get install -y ca-certificates +$stream = new StreamHandler('/shared-volume/logs/app.log', Logger::DEBUG); +$stream->setFormatter($formatter); + + +$log->pushHandler($stream); + + +$log->info("Hello Datadog!"); +echo '💜 Hello Datadog! 💜'; + + +$log->info("sending a metric"); +$statsd->increment('page.views', 1, array('environment'=>'dev')); + + +?> ``` +{{% /tab %}} +{{< /tabs >}} ## Further reading @@ -166,27 +866,16 @@ RUN apt-get update && apt-get install -y ca-certificates [1]: /integrations/google_cloud_platform/#log-collection -[2]: /tracing/trace_collection/#for-setup-instructions-select-your-language -[3]: https://buildpacks.io/docs/tools/pack/ -[4]: /metrics/custom_metrics/dogstatsd_metrics_submission/ +[2]: https://app.datadoghq.com/organization-settings/api-keys +[3]: https://hub.docker.com/r/datadog/serverless-init +[4]: /account_management/api-app-keys/#api-keys [5]: /getting_started/site/ -[6]: /getting_started/tagging/unified_service_tagging/ -[7]: /account_management/api-app-keys/#api-keys -[8]: https://github.com/DataDog/crpb/tree/main -[9]: /metrics/distributions/ -[10]: /metrics/#time-and-space-aggregation -[11]: https://cloud.google.com/run/docs/configuring/secrets -[12]: /tracing/trace_collection/library_config/go/ -[13]: /tracing/trace_collection/dd_libraries/python/?tab=containers#instrument-your-application -[14]: /tracing/trace_collection/dd_libraries/nodejs/?tab=containers#instrument-your-application -[15]: /tracing/trace_collection/dd_libraries/java/?tab=containers#instrument-your-application -[16]: /tracing/trace_collection/dd_libraries/ruby/?tab=containers#instrument-your-application -[17]: /tracing/trace_collection/dd_libraries/php/?tab=containers#install-the-extension -[18]: /tracing/trace_collection/dd_libraries/dotnet-core/?tab=linux#custom-instrumentation -[19]: /tracing/other_telemetry/connect_logs_and_traces/java/?tab=log4j2 -[20]: /tracing/other_telemetry/connect_logs_and_traces/nodejs -[21]: /tracing/other_telemetry/connect_logs_and_traces/dotnet?tab=serilog -[22]: /tracing/other_telemetry/connect_logs_and_traces/php -[23]: /tracing/other_telemetry/connect_logs_and_traces/python -[24]: /tracing/other_telemetry/connect_logs_and_traces/go -[25]: /tracing/other_telemetry/connect_logs_and_traces/ruby +[6]: /tracing/other_telemetry/connect_logs_and_traces/java/?tab=log4j2 +[7]: /tracing/other_telemetry/connect_logs_and_traces/nodejs +[8]: /tracing/other_telemetry/connect_logs_and_traces/dotnet?tab=serilog +[9]: /tracing/other_telemetry/connect_logs_and_traces/php +[10]: /tracing/other_telemetry/connect_logs_and_traces/python +[11]: /tracing/other_telemetry/connect_logs_and_traces/go +[12]: /tracing/other_telemetry/connect_logs_and_traces/ruby +[13]: /getting_started/tagging/unified_service_tagging/ +[14]: /serverless/guide/gcr_serverless_init \ No newline at end of file diff --git a/content/en/serverless/guide/datadog_forwarder_go.md b/content/en/serverless/guide/datadog_forwarder_go.md index a5d456f7490ae..991365ff1863f 100644 --- a/content/en/serverless/guide/datadog_forwarder_go.md +++ b/content/en/serverless/guide/datadog_forwarder_go.md @@ -2,6 +2,7 @@ title: Instrumenting Go Serverless Applications Using the Datadog Forwarder --- + ## Overview
@@ -40,8 +41,10 @@ Follow these steps to instrument the function: import ( "github.com/aws/aws-lambda-go/lambda" "github.com/DataDog/datadog-lambda-go" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" - httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x + httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" // 1.x + // httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2" // 2.x ) ``` 3. Wrap your Lambda function handler using the wrapper provided by the Datadog Lambda library. diff --git a/content/en/serverless/guide/gcr_serverless_init.md b/content/en/serverless/guide/gcr_serverless_init.md new file mode 100644 index 0000000000000..f4928e59cd4e6 --- /dev/null +++ b/content/en/serverless/guide/gcr_serverless_init.md @@ -0,0 +1,192 @@ +--- +title: Google Cloud Run with serverless-init +further_reading: + +- link: 'https://www.datadoghq.com/blog/collect-traces-logs-from-cloud-run-with-datadog/' + tag: 'Blog' + text: 'Collect traces, logs, and custom metrics from Cloud Run services' + +--- + +## Overview + +
To instrument your Google Cloud Run applications with a sidecar, see Instrument Google Cloud Run.
+ +Google Cloud Run is a fully managed serverless platform for deploying and scaling container-based applications. Datadog provides monitoring and log collection for Cloud Run through the [Google Cloud integration][1]. Datadog also provides a solution for instrumenting your Cloud Run applications with a purpose-built Agent to enable tracing, custom metrics, and direct log collection. + +### Prerequisites + +Make sure you have a [Datadog API key][7] and are using a programming language [supported by a Datadog tracing library][2]. + +## Instrument your application + +You can instrument your application in one of two ways: [Dockerfile](#dockerfile) or [buildpack](#buildpack). + +### Dockerfile + +Datadog publishes new releases of the `serverless-init` container image to Google’s gcr.io, AWS’s ECR, and on Docker Hub: + +| dockerhub.io | gcr.io | public.ecr.aws | +| ------------ | ------ | -------------- | +| datadog/serverless-init | gcr.io/datadoghq/serverless-init | public.ecr.aws/datadog/serverless-init | + +Images are tagged based on semantic versioning, with each new version receiving three relevant tags: + +* `1`, `1-alpine`: use these to track the latest minor releases, without breaking changes +* `1.x.x`, `1.x.x-alpine`: use these to pin to a precise version of the library +* `latest`, `latest-alpine`: use these to follow the latest version release, which may include breaking changes + +## How `serverless-init` works + +The `serverless-init` application wraps your process and executes it as a subprocess. It starts a DogStatsD listener for metrics and a Trace Agent listener for traces. It collects logs by wrapping the stdout/stderr streams of your application. After bootstrapping, serverless-init then launches your command as a subprocess. + +To get full instrumentation, ensure you are calling `datadog-init` as the first command that runs inside your Docker container. You can do this through by setting it as the entrypoint, or by setting it as the first argument in CMD. + +{{< programming-lang-wrapper langs="nodejs,python,java,go,dotnet,ruby,php" >}} +{{< programming-lang lang="nodejs" >}} + +{{% svl-init-nodejs %}} + +{{< /programming-lang >}} +{{< programming-lang lang="python" >}} + +{{% svl-init-python %}} + +{{< /programming-lang >}} +{{< programming-lang lang="java" >}} + +{{% svl-init-java %}} + +{{< /programming-lang >}} +{{< programming-lang lang="go" >}} + +{{% svl-init-go %}} + +{{< /programming-lang >}} +{{< programming-lang lang="dotnet" >}} + +{{% svl-init-dotnet %}} + +{{< /programming-lang >}} +{{< programming-lang lang="ruby" >}} + +{{% svl-init-ruby %}} + +{{< /programming-lang >}} +{{< programming-lang lang="php" >}} + +{{% svl-init-php %}} + +{{< /programming-lang >}} +{{< /programming-lang-wrapper >}} + +### Buildpack + +[`Pack Buildpacks`][3] provide a convenient way to package your container without using a Dockerfile. + +First, manually install your tracer: +- [Node.JS][14] +- [Python][13] +- [Java][15] +- [Go][12] +- [.NET][18] +- [Ruby][16] +- [PHP][17] + +Then, build your application by running the following command: + +```shell +pack build --builder=gcr.io/buildpacks/builder \ +--buildpack from=builder \ +--buildpack datadog/serverless-buildpack:latest \ +gcr.io// +``` + +**Note**: Buildpack instrumentation is not compatible with Alpine images. + +## Configure your application + +Once the container is built and pushed to your registry, the last step is to set the required environment variables for the Datadog Agent: +- `DD_API_KEY`: Datadog API key, used to send data to your Datadog account. It should be configured as a [Google Cloud Secret][11] for privacy and safety. +- `DD_SITE`: Datadog endpoint and website. Select your site on the right side of this page. Your site is: {{< region-param key="dd_site" code="true" >}}. + +For more environment variables and their function, see [Environment Variables](#environment-variables). + +The following command deploys the service and allows any external connection to reach it. In this example, your service listening is set to port 8080. Ensure that this port number matches the exposed port inside of your Dockerfile. + +``` +shell +gcloud run deploy --image=gcr.io// \ + --port=8080 \ + --update-env-vars=DD_API_KEY=$DD_API_KEY \ + --update-env-vars=DD_SITE=$DD_SITE \ +``` + +See [all arguments and flags for `gcloud run deploy`][26]. + +## Results + +Once the deployment is completed, your metrics and traces are sent to Datadog. In Datadog, navigate to **Infrastructure > Serverless** to see your serverless metrics and traces. + +## Additional configurations + +- **Advanced Tracing:** The Datadog Agent already provides some basic tracing for popular frameworks. Follow the [advanced tracing guide][2] for more information. + +- **Logs:** If you use the [Google Cloud integration][1], your logs are already being collected. Alternatively, you can set the `DD_LOGS_ENABLED` environment variable to `true` to capture application logs through the serverless instrumentation directly. + +- **Custom Metrics:** You can submit custom metrics using a [DogStatsD client][4]. For monitoring Cloud Run and other serverless applications, use [distribution][9] metrics. Distributions provide `avg`, `sum`, `max`, `min`, and `count` aggregations by default. On the Metric Summary page, you can enable percentile aggregations (p50, p75, p90, p95, p99) and also manage tags. To monitor a distribution for a gauge metric type, use `avg` for both the [time and space aggregations][11]. To monitor a distribution for a count metric type, use `sum` for both the time and space aggregations. + +### Environment Variables + +| Variable | Description | +| -------- | ----------- | +|`DD_API_KEY`| [Datadog API key][7] - **Required**| +| `DD_SITE` | [Datadog site][5] - **Required** | +| `DD_LOGS_ENABLED` | When true, send logs (stdout and stderr) to Datadog. Defaults to false. | +| `DD_LOGS_INJECTION`| When true, enrich all logs with trace data for supported loggers in [Java][19], [Node][20], [.NET][21], and [PHP][22]. See additional docs for [Python][23], [Go][24], and [Ruby][25]. | +| `DD_SERVICE` | See [Unified Service Tagging][6]. | +| `DD_VERSION` | See [Unified Service Tagging][6]. | +| `DD_ENV` | See [Unified Service Tagging][6]. | +| `DD_SOURCE` | See [Unified Service Tagging][6]. | +| `DD_TAGS` | See [Unified Service Tagging][6]. | + +## Troubleshooting + +This integration depends on your runtime having a full SSL implementation. If you are using a slim image, you may need to add the following command to your Dockerfile to include certificates. + +``` +RUN apt-get update && apt-get install -y ca-certificates +``` + + +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} + + +[1]: /integrations/google_cloud_platform/#log-collection +[2]: /tracing/trace_collection/#for-setup-instructions-select-your-language +[3]: https://buildpacks.io/docs/tools/pack/ +[4]: /metrics/custom_metrics/dogstatsd_metrics_submission/ +[5]: /getting_started/site/ +[6]: /getting_started/tagging/unified_service_tagging/ +[7]: /account_management/api-app-keys/#api-keys +[8]: https://github.com/DataDog/crpb/tree/main +[9]: /metrics/distributions/ +[10]: /metrics/#time-and-space-aggregation +[11]: https://cloud.google.com/run/docs/configuring/secrets +[12]: /tracing/trace_collection/library_config/go/ +[13]: /tracing/trace_collection/dd_libraries/python/?tab=containers#instrument-your-application +[14]: /tracing/trace_collection/dd_libraries/nodejs/?tab=containers#instrument-your-application +[15]: /tracing/trace_collection/dd_libraries/java/?tab=containers#instrument-your-application +[16]: /tracing/trace_collection/dd_libraries/ruby/?tab=containers#instrument-your-application +[17]: /tracing/trace_collection/dd_libraries/php/?tab=containers#install-the-extension +[18]: /tracing/trace_collection/dd_libraries/dotnet-core/?tab=linux#custom-instrumentation +[19]: /tracing/other_telemetry/connect_logs_and_traces/java/?tab=log4j2 +[20]: /tracing/other_telemetry/connect_logs_and_traces/nodejs +[21]: /tracing/other_telemetry/connect_logs_and_traces/dotnet?tab=serilog +[22]: /tracing/other_telemetry/connect_logs_and_traces/php +[23]: /tracing/other_telemetry/connect_logs_and_traces/python +[24]: /tracing/other_telemetry/connect_logs_and_traces/go +[25]: /tracing/other_telemetry/connect_logs_and_traces/ruby +[26]: https://cloud.google.com/sdk/gcloud/reference/run/deploy \ No newline at end of file diff --git a/content/en/service_catalog/_index.md b/content/en/service_catalog/_index.md index 71f085d3c24d2..06b42b9e7aa91 100644 --- a/content/en/service_catalog/_index.md +++ b/content/en/service_catalog/_index.md @@ -36,6 +36,9 @@ further_reading: - link: "https://www.datadoghq.com/blog/service-catalog-schema-v3/" tag: "Blog" text: "Improve developer experience and collaboration with Service Catalog schema version 3.0" +- link: "https://www.datadoghq.com/blog/memory-leak-workflow/" + tag: "Blog" + text: "Investigate memory leaks and OOMs with Datadog's guided workflow" algolia: tags: ['service catalog'] --- diff --git a/content/en/service_catalog/customize/_index.md b/content/en/service_catalog/customize/_index.md index 2c7bc4a7c448c..dbd2d298f0f49 100644 --- a/content/en/service_catalog/customize/_index.md +++ b/content/en/service_catalog/customize/_index.md @@ -141,7 +141,7 @@ With [Service Catalog metadata schema 2.2][5], you can specify the type and lang [3]: /service_catalog/service_definitions/v3-0/ [4]: https://forms.gle/zbLfnJYhD5Ab4Wr18 [5]: https://github.com/DataDog/schema/tree/main/service-catalog/v2.2 -[6]: /service_management/workflows/actions_catalog/ +[6]: /actions/actions_catalog/ [7]: /tracing/guide/inferred-service-opt-in/?tab=java#global-default-service-naming-migration [8]: /tracing/guide/service_overrides/#remove-service-overrides [9]: /tracing/guide/service_overrides/ diff --git a/content/en/service_catalog/navigating.md b/content/en/service_catalog/navigating.md index 41f62786c2cc1..acb8ad27aa4bf 100644 --- a/content/en/service_catalog/navigating.md +++ b/content/en/service_catalog/navigating.md @@ -133,5 +133,4 @@ To access additional details describing your CI status and static analysis viola [11]: /cloud_cost_management/tag_pipelines [12]: https://app.datadoghq.com/ci/pipelines [13]: https://app.datadoghq.com/ci/static-analysis -[15]: /service_management/workflows/actions_catalog/ [16]: /dora_metrics/setup diff --git a/content/en/service_catalog/scorecards/custom_rules.md b/content/en/service_catalog/scorecards/custom_rules.md index f043b250fd94d..8855befb991ea 100644 --- a/content/en/service_catalog/scorecards/custom_rules.md +++ b/content/en/service_catalog/scorecards/custom_rules.md @@ -47,6 +47,25 @@ To evaluate and add custom rules in the Scorecards UI: {{< img src="/tracing/service_catalog/scorecard-create-and-update-rule-ui.mp4" alt="User creating and evaluating a custom rule in the Scorecards UI" video="true" style="width:90%;" >}} +## Evaluate custom rules using Workflow Automation + +Workflow Automation allows you to automate the evaluation of your custom rules in Datadog using the [**Update scorecard rule outcome** action][3]. To set up a custom rule evaluation, create a Workflow from scratch or use one of the [Scorecards blueprints][4]. + +{{< img src="/tracing/service_catalog/scorecards_workflow_example.png" alt="Workflow evaluating whether a service has a tier defined in Service Catalog" style="width:90%;" >}} + +To set up a custom rule evaluation using Workflow Automation: + +1. Create a custom rule in Scorecards. +2. [Create a Workflow][5]. +3. Set a schedule for your Workflow to run on. +4. Click plus (+) icon to add a step. +5. Use the [**List service definitions** action][6] to fetch all defined services from Service Catalog. +6. Insert a [For loop][7] to iterate over each service one-by-one. +7. Select the action needed to fetch your evaluation data. +8. Transform the returned data using a custom JavaScript function to generate pass/fail outcomes for each service. +9. Use the [**Update scorecard rule outcome** action][3] to send results to Scorecards. +10. Run the Workflow and see your evaluations populate in Scorecards for your custom rule. + ## Further reading {{< partial name="whats-next/whats-next.html" >}} @@ -54,5 +73,8 @@ To evaluate and add custom rules in the Scorecards UI: [1]: /service_catalog/scorecards/scorecard_configuration/ [2]: /api/latest/service-scorecards/ - - +[3]: https://app.datadoghq.com/workflow/action-catalog#com.datadoghq.dd/com.datadoghq.dd.service_catalog/com.datadoghq.dd.service_catalog.updateScorecardRuleOutcome +[4]: https://app.datadoghq.com/workflow/blueprints?selected_category=SCORECARDS +[5]: /service_management/workflows/build/ +[6]: https://app.datadoghq.com/workflow/action-catalog#com.datadoghq.dd/com.datadoghq.dd.service_catalog/com.datadoghq.dd.service_catalog.listServiceDefinitions +[7]: https://app.datadoghq.com/workflow/action-catalog#//com.datadoghq.core.forLoop diff --git a/content/en/service_catalog/software_templates.md b/content/en/service_catalog/software_templates.md index d96e2b94ff16e..bc683520d2cf0 100644 --- a/content/en/service_catalog/software_templates.md +++ b/content/en/service_catalog/software_templates.md @@ -100,9 +100,8 @@ Below is a comprehensive list of actions available for Service Catalog in Datado [4]: https://www.cookiecutter.io/ [5]: https://gist.github.com/enbashi/366c62ee8c5fc350d52ddabc867602d4#file-readme-md [6]: /service_management/workflows/build/#create-a-custom-workflow -[7]: /service_management/workflows/actions_catalog/ -[8]: /service_management/workflows/actions_catalog/aws_lambda_invoke_lambda/ +[7]: /actions/actions_catalog/ [9]: https://app.datadoghq.com/app-builder/blueprints [10]: https://app.datadoghq.com/app-builder/apps/edit?activeTab=queries&showActionCatalog=false&template=create-new-s3-bucket&viewMode=edit [11]: https://app.datadoghq.com/app-builder/apps/edit?activeTab=queries&showActionCatalog=false&template=scaffolding&viewMode=edit -[12]: https://docs.datadoghq.com/service_management/workflows/private_actions/ +[12]: /actions/private_actions/ diff --git a/content/en/service_management/app_builder/_index.md b/content/en/service_management/app_builder/_index.md index 8a1cb759adbc9..a23dcaac5e3f7 100644 --- a/content/en/service_management/app_builder/_index.md +++ b/content/en/service_management/app_builder/_index.md @@ -2,7 +2,7 @@ title: App Builder disable_toc: false further_reading: -- link: "/service_management/workflows/actions_catalog/" +- link: "/actions/actions_catalog/" tag: "Documentation" text: "Actions Catalog" - link: "https://www.datadoghq.com/blog/datadog-app-builder-low-code-internal-tools/" diff --git a/content/en/service_management/app_builder/build.md b/content/en/service_management/app_builder/build.md index 816f84e7f120c..846fbeb78a38e 100644 --- a/content/en/service_management/app_builder/build.md +++ b/content/en/service_management/app_builder/build.md @@ -4,7 +4,7 @@ aliases: - /app_builder/build disable_toc: false further_reading: -- link: "/service_management/workflows/actions_catalog/" +- link: "/actions/actions_catalog/" tag: "Documentation" text: "Actions Catalog" --- @@ -76,6 +76,15 @@ UI components can trigger reactions on an [Event][11]. You can use [JavaScript Expressions][13] anywhere in App Builder to create custom interactions between the different parts of your app. +## Favorite an app + +To favorite an app and pin it to the top of your list of apps, click the star next to the name of the app in the [app list][14]: + +{{< img src="service_management/app_builder/app-list-star.png" alt="An app list with four apps, none of which are starred" style="width:40%;" >}} + +When you refresh the page, the starred app appears in a section at the top of your list of apps: + +{{< img src="service_management/app_builder/app-list-with-favorited-app.png" alt="An app list with four apps, one of which is starred and pinned to the top of the list" style="width:40%;" >}} ## View app version history @@ -130,4 +139,5 @@ The **Switch to GUI** option in the settings menu takes you back to the GUI edit [10]: https://app.datadoghq.com/app-builder/action-catalog [11]: /service_management/app_builder/events [12]: /service_management/app_builder/queries -[13]: /service_management/app_builder/expressions \ No newline at end of file +[13]: /service_management/app_builder/expressions +[14]: https://app.datadoghq.com/app-builder/apps/list \ No newline at end of file diff --git a/content/en/service_management/app_builder/connections/http_request.md b/content/en/service_management/app_builder/connections/http_request.md index 27c65d420d506..53905c9c775f5 100644 --- a/content/en/service_management/app_builder/connections/http_request.md +++ b/content/en/service_management/app_builder/connections/http_request.md @@ -181,5 +181,5 @@ To configure a private HTTP request: [2]: /service_management/app_builder/auth/ [3]: https://learn.microsoft.com/en-us/azure/active-directory/develop/scopes-oidc#the-default-scope [4]: https://datadoghq.slack.com/ -[5]: /service_management/app_builder/private_actions -[6]: /service_management/app_builder/private_actions/private_action_credentials/?tab=httpsaction#credential-files +[5]: /actions/private_actions +[6]: /actions/private_actions/private_action_credentials/?tab=httpsaction#credential-files diff --git a/content/en/service_management/app_builder/private_actions/_index.md b/content/en/service_management/app_builder/private_actions/_index.md deleted file mode 100644 index 9b6aba9b0c3cd..0000000000000 --- a/content/en/service_management/app_builder/private_actions/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Private Actions Overview -disable_toc: false ---- - -{{< include-markdown "service_management/workflows/private_actions" >}} - diff --git a/content/en/service_management/app_builder/private_actions/private_action_credentials.md b/content/en/service_management/app_builder/private_actions/private_action_credentials.md deleted file mode 100644 index 0ab0cb0557f02..0000000000000 --- a/content/en/service_management/app_builder/private_actions/private_action_credentials.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Handling Private Action Credentials - -disable_toc: false ---- - -{{< include-markdown "service_management/workflows/private_actions/private_action_credentials" >}} diff --git a/content/en/service_management/app_builder/private_actions/use_private_actions.md b/content/en/service_management/app_builder/private_actions/use_private_actions.md deleted file mode 100644 index 01ac5f165faf8..0000000000000 --- a/content/en/service_management/app_builder/private_actions/use_private_actions.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Use Private Actions -disable_toc: false ---- - -{{< include-markdown "service_management/workflows/private_actions/use_private_actions" >}} \ No newline at end of file diff --git a/content/en/service_management/events/_index.md b/content/en/service_management/events/_index.md index 06a8d0b82468d..ca300e5e92451 100644 --- a/content/en/service_management/events/_index.md +++ b/content/en/service_management/events/_index.md @@ -39,6 +39,8 @@ More than 100 Datadog integrations support events collection, including [Kuberne **Tip**: To open the Event Management page from Datadog's global search, press Cmd/Ctrl + K and search for `event explorer`. +**Update to Datadog monitor events `aggregation_key` starting March 1, 2025:** The Datadog monitor events `aggregation_key` is unique to each Monitor ID. Starting March 1st, this key will also include Monitor Group, making it unique per *Monitor ID and Monitor Group*. If you're using monitor events `aggregation_key` in dashboard queries or the Event API, you must migrate to use `@monitor.id`. Reach out to [support][10] if you have any question. + ## Components {{< whatsnext desc="Event Management features:">}} @@ -64,3 +66,4 @@ More than 100 Datadog integrations support events collection, including [Kuberne [7]: /integrations/amazon_auto_scaling/#events [8]: /integrations/sentry/ [9]: /integrations/nagios/#events +[10]: /help/ \ No newline at end of file diff --git a/content/en/service_management/workflows/_index.md b/content/en/service_management/workflows/_index.md index f385354e9a5ee..db63d5c564606 100644 --- a/content/en/service_management/workflows/_index.md +++ b/content/en/service_management/workflows/_index.md @@ -66,7 +66,7 @@ Below are a few examples of workflows you can build:
Do you have questions or feedback? Join the **#workflows** channel on the [Datadog Community Slack][4]. -[1]: /service_management/workflows/actions_catalog/ +[1]: /actions/actions_catalog/ [2]: /workflows/build/#build-a-workflow-from-a-blueprint [3]: https://app.datadoghq.com/dashboard/lists [4]: https://datadoghq.slack.com/ diff --git a/content/en/service_management/workflows/access.md b/content/en/service_management/workflows/access.md index c888661682a5c..1d17f0a33bfb0 100644 --- a/content/en/service_management/workflows/access.md +++ b/content/en/service_management/workflows/access.md @@ -14,7 +14,7 @@ further_reading: - link: "/integrations/" tag: "Documentation" text: "Learn about integrations" -- link: "/service_management/workflows/actions_catalog" +- link: "/actions/actions_catalog" tag: "Documentation" text: "See the list of workflow actions" --- @@ -148,7 +148,7 @@ You can restrict access on a specific workflow either from the workflow list pag [2]: /account_management/org_settings/service_accounts/ [3]: /account_management/rbac/ [4]: /service_management/workflows/trigger/ -[5]: /service_management/workflows/actions_catalog/ +[5]: /actions/actions_catalog/ [6]: /service_management/workflows/connections/ [7]: /account_management/rbac/permissions/#workflow-automation [8]: https://app.datadoghq.com/workflow diff --git a/content/en/service_management/workflows/actions/http.md b/content/en/service_management/workflows/actions/http.md index 77169d02988af..01a7c250d109e 100644 --- a/content/en/service_management/workflows/actions/http.md +++ b/content/en/service_management/workflows/actions/http.md @@ -174,5 +174,5 @@ To configure a private HTTP request: [2]: /service_management/workflows/access/ [3]: https://learn.microsoft.com/en-us/azure/active-directory/develop/scopes-oidc#the-default-scope [4]: https://datadoghq.slack.com/ -[5]: /service_management/workflows/private_actions -[6]: /service_management/workflows/private_actions/private_action_credentials/?tab=httpsaction#credential-files +[5]: /actions/private_actions +[6]: /actions/private_actions/private_action_credentials/?tab=httpsaction#credential-files diff --git a/content/en/service_management/workflows/build.md b/content/en/service_management/workflows/build.md index 574e6119df763..6e4ffeb4b74be 100644 --- a/content/en/service_management/workflows/build.md +++ b/content/en/service_management/workflows/build.md @@ -9,7 +9,7 @@ further_reading: - link: "/getting_started/workflow_automation/" tag: "Documentation" text: "Getting Started with Workflow Automation" -- link: "/service_management/workflows/actions_catalog" +- link: "/actions/actions_catalog" tag: "Documentation" text: "Browse the available actions in the Actions Catalog" - link: "/security/cloud_security_management/workflows" @@ -181,7 +181,7 @@ To perform tasks using the API, see the [Workflow Automation API documentation][ [3]: /service_management/workflows/trigger [4]: https://www.datadoghq.com/pricing/?product=workflow-automation#products [5]: https://app.datadoghq.com/workflow/blueprints -[6]: /service_management/workflows/actions_catalog/generic_actions/#testing-expressions-and-functions +[6]: /service_management/workflows/actions/#testing-expressions-and-functions [7]: /getting_started/tagging/ [8]: /glossary/#service [9]: /account_management/teams/ diff --git a/content/en/service_management/workflows/connections.md b/content/en/service_management/workflows/connections.md index c78fa7f228223..0cb80964ece1c 100644 --- a/content/en/service_management/workflows/connections.md +++ b/content/en/service_management/workflows/connections.md @@ -180,12 +180,11 @@ To delete a connection group:
Do you have questions or feedback? Join the **#workflows** channel on the [Datadog Community Slack][11]. -[1]: /service_management/workflows/actions_catalog/generic_actions/ [2]: https://app.datadoghq.com/workflow [3]: https://app.datadoghq.com/workflow/connections [4]: /service_management/workflows/access/#restrict-connection-use [6]: /integrations/ -[8]: /service_management/workflows/actions_catalog/generic_actions/ +[8]: /service_management/workflows/actions/ [9]: https://app.datadoghq.com/workflow [10]: /service_management/workflows/actions/http/ [11]: https://datadoghq.slack.com/ diff --git a/content/en/tracing/faq/trace_sampling_and_storage.md b/content/en/tracing/faq/trace_sampling_and_storage.md index 78981da13947e..e913d61f4bfb3 100644 --- a/content/en/tracing/faq/trace_sampling_and_storage.md +++ b/content/en/tracing/faq/trace_sampling_and_storage.md @@ -198,8 +198,10 @@ package main import ( "log" "net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/ext // 2.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer // 2.x ) func handler(w http.ResponseWriter, r *http.Request) { @@ -223,8 +225,10 @@ import ( "log" "net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" // 2.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func handler(w http.ResponseWriter, r *http.Request) { diff --git a/content/en/tracing/guide/configuring-primary-operation.md b/content/en/tracing/guide/configuring-primary-operation.md index dd0fb1e04bb21..dc41405ab7acc 100644 --- a/content/en/tracing/guide/configuring-primary-operation.md +++ b/content/en/tracing/guide/configuring-primary-operation.md @@ -69,7 +69,7 @@ See [Custom Instrumentation][3] for your programming language for detailed infor When using Datadog, the OpenTracing operation name is a resource and the OpenTracing "component" tag is Datadog's span name. For example, to define (in OpenTracing terms) a span that has the resource "/user/profile", and the span name "http.request": -{{< programming-lang-wrapper langs="java,python,ruby,go,nodejs,.NET,php,cpp" >}} +{{< programming-lang-wrapper langs="java,python,ruby,nodejs,.NET,php,cpp" >}} {{< programming-lang lang="java" >}} @@ -125,18 +125,6 @@ For more information, see [Setting up Ruby and OpenTracing][1]. [1]: /tracing/trace_collection/opentracing/ruby/#opentracing {{< /programming-lang >}} -{{< programming-lang lang="go" >}} - - -```go -opentracing.StartSpan("http.request", opentracer.ResourceName("/user/profile")) -``` - -For more information, see [Setting up Go and OpenTracing][1]. - - -[1]: /tracing/trace_collection/opentracing/go/#opentracing -{{< /programming-lang >}} {{< programming-lang lang="nodejs" >}} diff --git a/content/en/tracing/guide/inferred-service-opt-in.md b/content/en/tracing/guide/inferred-service-opt-in.md index 5cacf65e13654..3813fb1822122 100644 --- a/content/en/tracing/guide/inferred-service-opt-in.md +++ b/content/en/tracing/guide/inferred-service-opt-in.md @@ -1,8 +1,6 @@ --- title: Inferred Service dependencies - disable_toc: false -private: true further_reading: - link: "/tracing/services/" tag: "Documentation" @@ -18,10 +16,6 @@ further_reading: text: "Service Overrides" --- -{{< callout url="https://docs.google.com/forms/d/1imGm-4SfOPjwAr6fwgMgQe88mp4Y-n_zV0K3DcNW4UA/edit" d_target="#signupModal" btn_hidden="true" btn_hidden="false" header="Request access to the Preview!" >}} -Inferred service dependencies are in Preview. To request access, complete the form. -{{< /callout >}} - ## Overview Datadog can automatically discover the dependencies for an instrumented service, such as a database, a queue, or a third-party API, even if that dependency hasn't been instrumented yet. By analyzing outbound requests from your instrumented services, Datadog infers the presence of these dependencies and collects associated performance metrics. @@ -32,8 +26,6 @@ To determine the names and types of the inferred service dependencies, Datadog u If you're using the Go, Java, Node.js, PHP, .NET, or Ruby tracer, you can customize the default names for inferred entities. -**Note:** If you configure monitors, dashboards, or notebooks for a given inferred service during the Preview, you may need to update them if the naming scheme changes. Read more about migration steps in the [opt-in instructions](#opt-in). - ### Service page Dependency map Use the dependency map to visualize service-to-service communication and gain insight into system components such as databases, queues, and third-party dependencies. You can group dependencies by type and filter by Requests, Latency, or Errors to identify slow or failing connections. diff --git a/content/en/tracing/guide/instrument_custom_method.md b/content/en/tracing/guide/instrument_custom_method.md index 9cb9a37c7bf7d..8c91e002ab1d1 100644 --- a/content/en/tracing/guide/instrument_custom_method.md +++ b/content/en/tracing/guide/instrument_custom_method.md @@ -182,7 +182,10 @@ end ```go package ledger -import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +import ( + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x +) // [...] diff --git a/content/en/tracing/guide/service_overrides.md b/content/en/tracing/guide/service_overrides.md index d248853053d02..af8e49b374f67 100644 --- a/content/en/tracing/guide/service_overrides.md +++ b/content/en/tracing/guide/service_overrides.md @@ -1,17 +1,12 @@ --- title: Service Overrides disable_toc: false -private: true further_reading: - link: "/tracing/guide/inferred-service-opt-in" tag: "Documentation" text: "Opting-in to the new service representation" --- -{{< callout url="https://docs.google.com/forms/d/1imGm-4SfOPjwAr6fwgMgQe88mp4Y-n_zV0K3DcNW4UA/edit" d_target="#signupModal" btn_hidden="false" header="Request access to the Preview!" >}} -Inferred service dependencies are in Preview. To request access, complete the form. For opt-in instructions, see the Inferred Service dependencies guide. -{{< /callout >}} - ## Overview [Inferred services][1] improve how Datadog represents service dependencies. This document explains the changes and how to adapt your configuration. diff --git a/content/en/tracing/guide/tutorial-enable-go-aws-ecs-ec2.md b/content/en/tracing/guide/tutorial-enable-go-aws-ecs-ec2.md index 9a45a7f1c6988..ff64e72c68b6f 100644 --- a/content/en/tracing/guide/tutorial-enable-go-aws-ecs-ec2.md +++ b/content/en/tracing/guide/tutorial-enable-go-aws-ecs-ec2.md @@ -160,10 +160,16 @@ To enable tracing support: 1. Tp enable automatic tracing, uncomment the following imports in `apm-tutorial-golang/cmd/notes/main.go`: {{< code-block lang="go" filename="cmd/notes/main.go">}} - sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" - chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" - httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" // 1.x + chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" // 1.x + httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" // 1.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + + // If you are using v2, the lines look like this: + // sqltrace "github.com/DataDog/dd-trace-go/contrib/database/sql/v2" // 2.x + // chitrace "github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2" // 2.x + // httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2" // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x {{< /code-block >}} 1. In the `main()` function, uncomment the following lines: @@ -179,11 +185,11 @@ To enable tracing support: {{< /code-block >}} {{< code-block lang="go" filename="cmd/notes/main.go">}} - r.Use(chitrace.Middleware(chitrace.WithServiceName("notes"))){{< /code-block >}} + r.Use(chitrace.Middleware(chitrace.WithService("notes"))){{< /code-block >}} 1. In `setupDB()`, uncomment the following lines: {{< code-block lang="go" filename="cmd/notes/main.go">}} - sqltrace.Register("sqlite3", &sqlite3.SQLiteDriver{}, sqltrace.WithServiceName("db")) + sqltrace.Register("sqlite3", &sqlite3.SQLiteDriver{}, sqltrace.WithService("db")) db, err := sqltrace.Open("sqlite3", "file::memory:?cache=shared"){{< /code-block >}} {{< code-block lang="go" filename="cmd/notes/main.go">}} @@ -213,7 +219,9 @@ To enable tracing support: Also remove the comment around the following import: {{< code-block lang="go" disable_copy="true" filename="notes/notesController.go" collapsible="true" >}} - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"{{< /code-block >}} + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x + {{< /code-block >}} 1. The `doLongRunningProcess` function creates child spans from a parent context. Remove the comments to enable it: {{< code-block lang="go" filename="notes/notesHelper.go" disable_copy="true" collapsible="true" >}} diff --git a/content/en/tracing/guide/tutorial-enable-go-aws-ecs-fargate.md b/content/en/tracing/guide/tutorial-enable-go-aws-ecs-fargate.md index 7c5c002f7fbc2..adad01e443884 100644 --- a/content/en/tracing/guide/tutorial-enable-go-aws-ecs-fargate.md +++ b/content/en/tracing/guide/tutorial-enable-go-aws-ecs-fargate.md @@ -162,10 +162,16 @@ To enable tracing support: 1. Uncomment the following imports in `apm-tutorial-golang/cmd/notes/main.go`: {{< code-block lang="go" filename="cmd/notes/main.go">}} - sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" - chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" - httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" // 1.x + chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" // 1.x + httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" // 1.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + + // If you are using v2, the lines look like this: + // sqltrace "github.com/DataDog/dd-trace-go/contrib/database/sql/v2" // 2.x + // chitrace "github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2" // 2.x + // httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2" // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x {{< /code-block >}} 1. In the `main()` function, uncomment the following lines: @@ -180,11 +186,11 @@ To enable tracing support: })){{< /code-block >}} {{< code-block lang="go" filename="cmd/notes/main.go" >}} - r.Use(chitrace.Middleware(chitrace.WithServiceName("notes"))){{< /code-block >}} + r.Use(chitrace.Middleware(chitrace.WithService("notes"))){{< /code-block >}} 1. In `setupDB()`, uncomment the following lines: {{< code-block lang="go" filename="cmd/notes/main.go" >}} - sqltrace.Register("sqlite3", &sqlite3.SQLiteDriver{}, sqltrace.WithServiceName("db")) + sqltrace.Register("sqlite3", &sqlite3.SQLiteDriver{}, sqltrace.WithService("db")) db, err := sqltrace.Open("sqlite3", "file::memory:?cache=shared"){{< /code-block >}} {{< code-block lang="go" filename="cmd/notes/main.go" >}} @@ -213,7 +219,9 @@ To enable tracing support: Also remove the comment around the following import: {{< code-block lang="go" disable_copy="true" filename="notes/notesController.go" collapsible="true" >}} - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"{{< /code-block >}} + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x + {{< /code-block >}} 1. The `doLongRunningProcess` function creates child spans from a parent context. Remove the comments to enable it: {{< code-block lang="go" filename="notes/notesHelper.go" disable_copy="true" collapsible="true" >}} diff --git a/content/en/tracing/guide/tutorial-enable-go-containers.md b/content/en/tracing/guide/tutorial-enable-go-containers.md index 92fa3a702605c..65974de7d6383 100644 --- a/content/en/tracing/guide/tutorial-enable-go-containers.md +++ b/content/en/tracing/guide/tutorial-enable-go-containers.md @@ -99,10 +99,16 @@ Next, configure the Go application to enable tracing. Because the Agent runs in To enable tracing support, uncomment the following imports in `apm-tutorial-golang/cmd/notes/main.go`: {{< code-block lang="go" filename="cmd/notes/main.go" >}} -sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" -chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" -httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" -"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" // 1.x + chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" // 1.x + httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" // 1.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + + // If you are using v2, the lines look like this: + // sqltrace "github.com/DataDog/dd-trace-go/contrib/database/sql/v2" // 2.x + // chitrace "github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2" // 2.x + // httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2" // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x {{< /code-block >}} In the `main()` function, uncomment the following lines: @@ -119,13 +125,13 @@ client = httptrace.WrapClient(client, httptrace.RTWithResourceNamer(func(req *ht {{< /code-block >}} {{< code-block lang="go" filename="cmd/notes/main.go" >}} -r.Use(chitrace.Middleware(chitrace.WithServiceName("notes"))) +r.Use(chitrace.Middleware(chitrace.WithService("notes"))) {{< /code-block >}} In `setupDB()`, uncomment the following lines: {{< code-block lang="go" filename="cmd/notes/main.go" >}} -sqltrace.Register("sqlite3", &sqlite3.SQLiteDriver{}, sqltrace.WithServiceName("db")) +sqltrace.Register("sqlite3", &sqlite3.SQLiteDriver{}, sqltrace.WithService("db")) db, err := sqltrace.Open("sqlite3", "file::memory:?cache=shared") {{< /code-block >}} @@ -253,23 +259,28 @@ Datadog has several fully supported libraries for Go that allow for automatic tr import ( ... - sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" - chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" - httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" + sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" // 1.x + chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" // 1.x + httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" // 1.x + + // If you are using v2, the lines look like this: + // sqltrace "github.com/DataDog/dd-trace-go/contrib/database/sql/v2" // 2.x + // chitrace "github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2" // 2.x + // httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2" // 2.x ... ) {{< /code-block >}} -In `cmd/notes/main.go`, the Datadog libraries are initialized with the `WithServiceName` option. For example, the `chitrace` library is initialized as follows: +In `cmd/notes/main.go`, the Datadog libraries are initialized with the `WithService` option. For example, the `chitrace` library is initialized as follows: {{< code-block lang="go" filename="cmd/notes/main.go" disable_copy="true" collapsible="true" >}} r := chi.NewRouter() r.Use(middleware.Logger) -r.Use(chitrace.Middleware(chitrace.WithServiceName("notes"))) +r.Use(chitrace.Middleware(chitrace.WithService("notes"))) r.Mount("/", nr.Register()) {{< /code-block >}} -Using `chitrace.WithServiceName("notes")` ensures that all elements traced by the library fall under the service name `notes`. +Using `chitrace.WithService("notes")` ensures that all elements traced by the library fall under the service name `notes`. The `main.go` file contains more implementation examples for each of these libraries. For an extensive list of libraries, see [Go Compatibility Requirements][16]. @@ -300,7 +311,8 @@ r.Delete("/notes/{noteID}", makeSpanMiddleware("DeleteNote", nr.DeleteNoteByID)) Also remove the comment around the following import: {{< code-block lang="go" filename="notes/notesController.go" disable_copy="true" collapsible="true" >}} -"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x +// "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x {{< /code-block >}} There are several examples of custom tracing in the sample application. Here are a couple more examples. Remove the comments to enable these spans: @@ -347,8 +359,10 @@ To enable tracing in the calendar application: 1. Uncomment the following lines in `cmd/calendar/main.go`: {{< code-block lang="go" filename="cmd/calendar/main.go" disable_copy="true" collapsible="true" >}} - chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" // 1.x + // chitrace "github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2" // 2.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x {{< /code-block >}} {{< code-block lang="go" filename="cmd/calendar/main.go" disable_copy="true" collapsible="true" >}} @@ -357,7 +371,7 @@ To enable tracing in the calendar application: {{< /code-block >}} {{< code-block lang="go" filename="cmd/calendar/main.go" disable_copy="true" collapsible="true" >}} - r.Use(chitrace.Middleware(chitrace.WithServiceName("calendar"))) + r.Use(chitrace.Middleware(chitrace.WithService("calendar"))) {{< /code-block >}} 1. Open `docker/all-docker-compose.yaml` and uncomment the `calendar` service to set up the Agent host and Unified Service Tags for the app and for Docker: diff --git a/content/en/tracing/guide/tutorial-enable-go-host.md b/content/en/tracing/guide/tutorial-enable-go-host.md index ea2025704cf47..ff808666f0629 100644 --- a/content/en/tracing/guide/tutorial-enable-go-host.md +++ b/content/en/tracing/guide/tutorial-enable-go-host.md @@ -96,17 +96,24 @@ make exitNotes Next, install the Go tracer. From your `apm-tutorial-golang` directory, run: {{< code-block lang="shell" >}} -go get gopkg.in/DataDog/dd-trace-go.v1/ddtrace +go get gopkg.in/DataDog/dd-trace-go.v1/ddtrace // 1.x +// go get github.com/DataDog/dd-trace-go/v2/ddtrace // 2.x {{< /code-block >}} Now that the tracing library has been added to `go.mod`, enable tracing support. Uncomment the following imports in `apm-tutorial-golang/cmd/notes/main.go`: {{< code-block lang="go" filename="cmd/notes/main.go" >}} - sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" - chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" - httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" // 1.x + chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" // 1.x + httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" // 1.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + + // If you are using v2, the lines look like this: + // sqltrace "github.com/DataDog/dd-trace-go/contrib/database/sql/v2" // 2.x + // chitrace "github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2" // 2.x + // httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2" // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x "fmt" {{< /code-block >}} @@ -135,13 +142,13 @@ client = httptrace.WrapClient(client, httptrace.RTWithResourceNamer(func(req *ht {{< /code-block >}} {{< code-block lang="go" filename="cmd/notes/main.go">}} -r.Use(chitrace.Middleware(chitrace.WithServiceName("notes"))) +r.Use(chitrace.Middleware(chitrace.WithService("notes"))) {{< /code-block >}} In `setupDB()`, uncomment the following lines: {{< code-block lang="go" filename="cmd/notes/main.go">}} -sqltrace.Register("sqlite3", &sqlite3.SQLiteDriver{}, sqltrace.WithServiceName("db")) +sqltrace.Register("sqlite3", &sqlite3.SQLiteDriver{}, sqltrace.WithService("db")) db, err := sqltrace.Open("sqlite3", "file::memory:?cache=shared") {{< /code-block >}} @@ -216,23 +223,28 @@ Datadog has several fully supported libraries for Go that allow for automatic tr import ( ... - sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" - chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" - httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" + sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql" // 1.x + chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" // 1.x + httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" // 1.x + + // If you are using v2, the lines look like this: + // sqltrace "github.com/DataDog/dd-trace-go/contrib/database/sql/v2" // 2.x + // chitrace "github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2" // 2.x + // httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2" // 2.x ... ) {{< /code-block >}} -In `cmd/notes/main.go`, the Datadog libraries are initialized with the `WithServiceName` option. For example, the `chitrace` library is initialized as follows: +In `cmd/notes/main.go`, the Datadog libraries are initialized with the `WithService` option. For example, the `chitrace` library is initialized as follows: {{< code-block lang="go" filename="main.go" disable_copy="true" collapsible="true" >}} r := chi.NewRouter() r.Use(middleware.Logger) -r.Use(chitrace.Middleware(chitrace.WithServiceName("notes"))) +r.Use(chitrace.Middleware(chitrace.WithService("notes"))) r.Mount("/", nr.Register()) {{< /code-block >}} -Using `chitrace.WithServiceName("notes")` ensures that all elements traced by the library fall under the service name `notes`. +Using `chitrace.WithService("notes")` ensures that all elements traced by the library fall under the service name `notes`. The `main.go` file contains more implementation examples for each of these libraries. For an extensive list of libraries, see [Go Compatibility Requirements][16]. @@ -263,7 +275,8 @@ Remove the comments around the following lines: Also remove the comment around the following import: {{< code-block lang="go" filename="notes/notesController.go" disable_copy="true" collapsible="true" >}} -"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x +// "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x {{< /code-block >}} There are several examples of custom tracing in the sample application. Here are a couple more examples. Remove the comments to enable these spans: @@ -301,8 +314,10 @@ func privateMethod1(ctx context.Context) { Uncomment the following imports: {{< code-block lang="go" filename="notes/notesHelper.go" disable_copy="true" collapsible="true" >}} - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" // 2.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x {{< /code-block >}} Launch the application with `make runNotes` and try the `curl` commands again to observe the custom spans and traces you've just configured: @@ -332,8 +347,10 @@ The sample project includes a second application called `calendar` that returns To enable tracing in the calendar application, uncomment the following lines in `cmd/calendar/main.go`: {{< code-block lang="go" filename="cmd/calendar/main.go" disable_copy="true" collapsible="true" >}} - chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + chitrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-chi/chi" // 1.x + // chitrace "github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2" // 2.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x {{< /code-block >}} {{< code-block lang="go" filename="cmd/calendar/main.go" disable_copy="true" collapsible="true" >}} @@ -342,7 +359,7 @@ To enable tracing in the calendar application, uncomment the following lines in {{< /code-block >}} {{< code-block lang="go" filename="cmd/calendar/main.go" disable_copy="true" collapsible="true" >}} - r.Use(chitrace.Middleware(chitrace.WithServiceName("calendar"))) + r.Use(chitrace.Middleware(chitrace.WithService("calendar"))) {{< /code-block >}} 1. If the notes application is still running, use `make exitNotes` to stop it. diff --git a/content/en/tracing/legacy_app_analytics/_index.md b/content/en/tracing/legacy_app_analytics/_index.md index e2377963cada6..9997561db7dec 100644 --- a/content/en/tracing/legacy_app_analytics/_index.md +++ b/content/en/tracing/legacy_app_analytics/_index.md @@ -59,7 +59,7 @@ Datadog.configure { |c| c.tracing.analytics.enabled = true } App Analytics is available starting in version 1.11.0 of the Go tracing client, and can be enabled globally for all **web** integrations using: -* the [`WithAnalytics`][1] tracer start option, for example: +* the [`WithAnalytics`][1] ([v2 documentation][2]) tracer start option, for example: ```go tracer.Start(tracer.WithAnalytics(true)) @@ -68,6 +68,7 @@ App Analytics is available starting in version 1.11.0 of the Go tracing client, * starting in version 1.26.0 using environment variable: `DD_TRACE_ANALYTICS_ENABLED=true` [1]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#WithAnalytics +[2]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer#WithAnalytics {{< /programming-lang >}} {{< programming-lang lang="nodejs" >}} @@ -191,8 +192,10 @@ In addition to the global setting, you can enable or disable App Analytics indiv package main import ( - httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" // 1.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2" // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func main() { diff --git a/content/en/tracing/metrics/runtime_metrics/go.md b/content/en/tracing/metrics/runtime_metrics/go.md index 064f118537707..75479b60ffbac 100644 --- a/content/en/tracing/metrics/runtime_metrics/go.md +++ b/content/en/tracing/metrics/runtime_metrics/go.md @@ -28,7 +28,7 @@ tracer.Start(tracer.WithRuntimeMetrics()) View runtime metrics in correlation with your Go services on the [Service Catalog][1] in Datadog. -By default, runtime metrics from your application are sent every 10 seconds to the Datadog Agent with DogStatsD. Make sure that [DogStatsD is enabled for the Agent][2]. If your Datadog Agent DogStatsD address differs from the default `localhost:8125`, use the [`WithDogstatsdAddress`][3] option (available starting in 1.18.0) or the environment variables `DD_AGENT_HOST` and `DD_DOGSTATSD_PORT`. +By default, runtime metrics from your application are sent every 10 seconds to the Datadog Agent with DogStatsD. Make sure that [DogStatsD is enabled for the Agent][2]. If your Datadog Agent DogStatsD address differs from the default `localhost:8125`, use the [`WithDogstatsdAddress`][3] (or [`WithDogstatsdAddress` v2][9]) option (available starting in 1.18.0) or the environment variables `DD_AGENT_HOST` and `DD_DOGSTATSD_PORT`. If `WithDogstatsdAddress` is not used, the tracer attempts to determine the address of the statsd service according to the following rules: 1. Look for `/var/run/datadog/dsd.socket` and use it if present. IF NOT, continue to #2. @@ -61,4 +61,5 @@ Along with displaying these metrics in your APM Service Page, Datadog provides a [5]: /developers/dogstatsd/?tab=kubernetes#agent [6]: /agent/amazon_ecs/#create-an-ecs-task [7]: https://app.datadoghq.com/dash/integration/30587/go-runtime-metrics -[8]: /developers/dogstatsd/unix_socket/ \ No newline at end of file +[8]: /developers/dogstatsd/unix_socket/ +[9]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer#WithDogstatsdAddress \ No newline at end of file diff --git a/content/en/tracing/other_telemetry/connect_logs_and_traces/go.md b/content/en/tracing/other_telemetry/connect_logs_and_traces/go.md index 04e04e1d6cc35..2f1e4987599f0 100644 --- a/content/en/tracing/other_telemetry/connect_logs_and_traces/go.md +++ b/content/en/tracing/other_telemetry/connect_logs_and_traces/go.md @@ -31,7 +31,8 @@ package main import ( "net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func handler(w http.ResponseWriter, r *http.Request) { @@ -59,8 +60,10 @@ package main import ( "github.com/sirupsen/logrus" - dd_logrus "gopkg.in/DataDog/dd-trace-go.v1/contrib/sirupsen/logrus" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + dd_logrus "gopkg.in/DataDog/dd-trace-go.v1/contrib/sirupsen/logrus" // 1.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // dd_logrus "github.com/DataDog/dd-trace-go/contrib/sirupsen/logrus/v2" // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func main() { diff --git a/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/android.md b/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/android.md index 6fbd106e0afe8..da35afdd08e94 100644 --- a/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/android.md +++ b/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/android.md @@ -307,12 +307,16 @@ dependencies { {{< tabs >}} {{% tab "Kotlin" %}} ```kotlin +import io.opentracing.util.GlobalTracer + val tracer = AndroidTracer.Builder().build() GlobalTracer.registerIfAbsent(tracer) ``` {{% /tab %}} {{% tab "Java" %}} ```java +import io.opentracing.util.GlobalTracer; + AndroidTracer tracer = new AndroidTracer.Builder().build(); GlobalTracer.registerIfAbsent(tracer); ``` @@ -947,4 +951,4 @@ The following methods in `AndroidTracer.Builder` can be used when initializing t [9]: https://github.com/square/retrofit/tree/master/retrofit-adapters/rxjava3 [10]: /tracing/trace_collection/custom_instrumentation/android/otel [11]: https://opentracing.io -[12]: /real_user_monitoring/error_tracking/mobile/android/?tab=us#upload-your-mapping-file \ No newline at end of file +[12]: /real_user_monitoring/error_tracking/mobile/android/?tab=us#upload-your-mapping-file diff --git a/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/go.md b/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/go.md index 15ea1f21d9342..3760d47e4b600 100644 --- a/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/go.md +++ b/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/go.md @@ -18,6 +18,9 @@ further_reading: - link: "https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/ddtrace" tag: "External Site" text: "Tracer library API documentation" +- link: "https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace" + tag: "External Site" + text: "Tracer library API documentation for v2" - link: https://github.com/DataDog/orchestrion tag: "Source Code" text: "Orchestrion source code" @@ -26,6 +29,11 @@ further_reading: text: "Explore your services, resources and traces" --- +
+The Go Tracer v2 is in Preview! See the migration guide to upgrade. +
+ + ## Compatibility requirements The Go Tracer requires Go `1.18+` and Datadog Agent `>= 5.21.1`. For a full list of Datadog's Go version and framework support (including legacy and maintenance versions), see the [Compatibility Requirements][1] page. @@ -223,7 +231,7 @@ Some of the instrumentation performed by `orchestrion` is done callee-side (or l #### Use the tracing library -You can use the [tracing library][4] in your Orchestrion-built application. This is useful for instrumenting frameworks not yet supported by Orchestrion. However, be aware that this may result in duplicated trace spans in the future as Orchestrion support expands. Review the [release notes][11] when updating your `orchestrion` dependency to stay informed about new features and adjust your manual instrumentation as necessary. +You can use the [tracing library][4] ([or the v2 tracing library][5]) in your Orchestrion-built application. This is useful for instrumenting frameworks not yet supported by Orchestrion. However, be aware that this may result in duplicated trace spans in the future as Orchestrion support expands. Review the [release notes][11] when updating your `orchestrion` dependency to stay informed about new features and adjust your manual instrumentation as necessary. #### Use the continuous profiler @@ -235,6 +243,7 @@ To enable the profiler, set the environment variable `DD_PROFILING_ENABLED=true` To troubleshoot builds that `orchestrion` manages, see [Troubleshooting Go Compile-Time Instrumentation][13]. [4]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/ddtrace +[5]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace [6]: https://github.com/DataDog/orchestrion [7]: /security/application_security/threats/exploit-prevention [8]: https://go.dev/doc/devel/release#policy @@ -253,7 +262,7 @@ To troubleshoot builds that `orchestrion` manages, see [Troubleshooting Go Compi ### Add the tracer library to your application -First, import and start the tracer in your code following the [Library Configuration][3] documentation. Refer to the [API documentation][4] for configuration instructions and details about using the API. +First, import and start the tracer in your code following the [Library Configuration][3] documentation. Refer to the [API documentation][4] (or the [API documentation v2][6]) for configuration instructions and details about using the API. ### Activate Go integrations to create spans @@ -262,6 +271,7 @@ Activate [Go integrations][1] to generate spans. Datadog has a series of pluggab [1]: /tracing/compatibility_requirements/go [3]: /tracing/trace_collection/library_config/go/ [4]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/ddtrace +[6]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace {{% /tab %}} diff --git a/content/en/tracing/trace_collection/compatibility/go.md b/content/en/tracing/trace_collection/compatibility/go.md index 819e67a1dfe3d..d6f9f20075639 100644 --- a/content/en/tracing/trace_collection/compatibility/go.md +++ b/content/en/tracing/trace_collection/compatibility/go.md @@ -26,12 +26,32 @@ The Go Datadog Trace Library has a [version support policy][2] defined for Go ve * [Automatically at compile time using `orchestrion`][78] * [Manually add and initialize the Datadog Go tracer][77] +### Go Tracer Support + +The Go Tracer v2 is in Preview! See the migration guide to upgrade. Support for each version of the Go Tracer is as follows: + +| Version | Preview | General Availability (GA) | Maintenance | End-of-life (EOL) | +|---------|------------|----------------------------|-------------|-------------------| +| v2 | 2024-11-27 | TBD | TBD | TBD | +| v1 | 2018-06-06 | 2018-06-06 | 2025-02-05 | 2025-12-31 | +| v0 | 2016-12-12 | 2016-12-12 | 2018-06-06 | 2019-06-06 | + +| Level | Support provided | +|---------------------------|---------------------------------------------------------| +| Unsupported | No implementation. Contact [Datadog support][2] for special requests. | +| Preview | Initial implementation. May not yet contain all features. Support for new features and bug and security fixes are provided on a best-effort basis.| +| General Availability (GA) | Full implementation of all features. Full support for new features and bug and security fixes.| +| Maintenance | Full implementation of existing features. Does not receive new features. Support for bug and security fixes only.| +| End-of-life (EOL) | No support. | + ### Integrations #### Framework compatibility Integrate the Go tracer with the following list of web frameworks using one of the following helper packages. +{{< tabs >}} +{{% tab "v1" %}} **Note**: The [integrations documentation][5] provides a detailed overview of the supported packages and their APIs, along with usage examples. | Framework | Support Type | GoDoc Datadog Documentation | @@ -52,40 +72,37 @@ Integrate the Go tracer with the following list of web frameworks using one of t The Go tracer includes support for the following data stores and libraries. -| Library | Support Type | Examples and Documentation | -|--------------------------|------------------------------------------------------|---------------------------------------------------------------------------------| -| [AWS SDK][20] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws][21] | -| [AWS SDK v2][75] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go-v2/aws][76] | -| [Elasticsearch][22] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic][23] | -| [Cassandra][24] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql][25] | -| [GraphQL][26] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go][27] | -| [HTTP][28] | Manual or Compile-Time[🔹](#library-side) | [gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http][29] | -| [HTTP router][30] | Manual or Compile-Time[🔹](#library-side) | [gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter][31] | -| [Redis (go-redis)][32] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis][33] | -| [Redis (go-redis-v8)][34]| Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis.v8][35] | -| [Redis (redigo)][36] | Manual | [gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo][37] | -| [Redis (new redigo)][38] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/gomodule/redigo][39] | -| [SQL][40] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql][41] | -| [SQLx][42] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx][43] | -| [MongoDB][44] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/go.mongodb.org/mongo-driver/mongo][45] | -| [MongoDB (mgo)][73] | Manual | [gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo][46] | -| [BuntDB][47] | Manual | [gopkg.in/DataDog/dd-trace-go.v1/contrib/tidwall/buntdb][48] | -| [LevelDB][49] | Manual | [gopkg.in/DataDog/dd-trace-go.v1/contrib/syndtr/goleveldb/leveldb][50] | -| [miekg/dns][51] | Manual | [gopkg.in/DataDog/dd-trace-go.v1/contrib/miekg/dns][52] | -| [Kafka (confluent)][53] | Manual or Compile-Time[🔹](#library-side) | [gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go][54] | -| [Kafka (sarama)][55] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/IBM/sarama.v1][56] | -| [Google API][57] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/api][58] | -| [go-restful][59] | Manual | [gopkg.in/DataDog/dd-trace-go.v1/contrib/emicklei/go-restful][60] | -| [Twirp][61] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/twitchtv/twirp][62] | -| [Vault][63] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/hashicorp/vault][64] | -| [Consul][65] | Manual | [gopkg.in/DataDog/dd-trace-go.v1/contrib/hashicorp/consul][66] | -| [Gorm][67] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/jinzhu/gorm][68] | -| [Gorm v2][69] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/gorm.io/gorm.v1][70] | -| [Kubernetes][71] | Manual or Compile-Time | [gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes][72] | -| [Memcache][73] | Manual | [gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache][74] | - - -🔹 Compile-time instrumentation is done directly within the library, and cannot be locally opted out of using the `//orchestrion:ignore` directive. +| Library | Support Type | Examples and Documentation | +|-------------------------|-----------------|---------------------------------------------------------------------------------| +| [AWS SDK][20] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws][21] | +| [AWS SDK v2][75] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go-v2/aws][76] | +| [Elasticsearch][22] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic][23] | +| [Cassandra][24] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql][25] | +| [GraphQL][26] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go][27] | +| [HTTP][28] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http][29] | +| [HTTP router][30] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter][31] | +| [Redis (go-redis)][32] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis][33] | +| [Redis (go-redis-v8)][34]| Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis.v8][35] | +| [Redis (redigo)][36] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo][37] | +| [Redis (new redigo)][38]| Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/gomodule/redigo][39] | +| [SQL][40] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql][41] | +| [SQLx][42] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx][43] | +| [MongoDB][44] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/go.mongodb.org/mongo-driver/mongo][45] | +| [MongoDB (mgo)[73] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo][46] | +| [BuntDB][47] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/tidwall/buntdb][48] | +| [LevelDB][49] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/syndtr/goleveldb/leveldb][50] | +| [miekg/dns][51] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/miekg/dns][52] | +| [Kafka (confluent)][53] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go][54] | +| [Kafka (sarama)][55] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/Shopify/sarama][56] | +| [Google API][57] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/api][58] | +| [go-restful][59] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/emicklei/go-restful][60] | +| [Twirp][61] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/twitchtv/twirp][62] | +| [Vault][63] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/hashicorp/vault][64] | +| [Consul][65] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/hashicorp/consul][66] | +| [Gorm][67] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/jinzhu/gorm][68] | +| [Gorm v2][69] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/gorm.io/gorm.v1][70] | +| [Kubernetes][71] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes][72] | +| [Memcache][73] | Fully Supported | [gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache][74] | Packages must be imported with: @@ -93,15 +110,6 @@ Packages must be imported with: ```go import "gopkg.in/DataDog/dd-trace-go.v1/contrib//" ``` - - -## Further Reading - -{{< partial name="whats-next/whats-next.html" >}} - -[1]: https://github.com/DataDog/dd-trace-go -[2]: https://github.com/DataDog/dd-trace-go?tab=readme-ov-file#go-support-policy -[4]: https://www.datadoghq.com/support/ [5]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib [6]: https://gin-gonic.com [7]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin @@ -152,8 +160,8 @@ import "gopkg.in/DataDog/dd-trace-go.v1/contrib//" [52]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/miekg/dns [53]: https://github.com/confluentinc/confluent-kafka-go [54]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go -[55]: https://github.com/IBM/sarama -[56]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/IBM/sarama.v1 +[55]: https://github.com/Shopify/sarama +[56]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/Shopify/sarama [57]: https://github.com/googleapis/google-api-go-client [58]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/api [59]: https://github.com/emicklei/go-restful @@ -174,5 +182,140 @@ import "gopkg.in/DataDog/dd-trace-go.v1/contrib//" [74]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache [75]: https://aws.github.io/aws-sdk-go-v2/docs/ [76]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go-v2/aws +{{% /tab %}} +{{% tab "v2" %}} + +**Note**: The [integrations documentation][79] provides a detailed overview of the supported packages and their APIs, along with usage examples. + +| Framework | Support Type | GoDoc Datadog Documentation | +|-------------------|-----------------|--------------------------------------------------------------------------| +| [Gin][6] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/gin-gonic/gin/v2][80] | +| [Gorilla Mux][8] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2][81] | +| [gRPC][10] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/google.golang.org/grpc/v2][82] | +| [chi][13] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2][83] | +| [echo v4][15] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/labstack/echo.v4/v2][84] | +| [Fiber][18] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/gofiber/fiber.v2/v2][85] | + +#### Library compatibility + +The Go tracer includes support for the following data stores and libraries. + +| Library | Support Type | Examples and Documentation | +|-------------------------|-----------------|---------------------------------------------------------------------------------| +| [AWS SDK][20] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/aws/aws-sdk-go/aws/v2][86] | +| [AWS SDK v2][75] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/aws/aws-sdk-go-v2/aws/v2][113] | +| [Elasticsearch][22] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/olivere/elastic.v5/v2][87] | +| [Cassandra][24] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/gocql/gocql/v2][88] | +| [GraphQL][26] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/graph-gophers/graphql-go/v2][89] | +| [HTTP][28] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/net/http/v2][90] | +| [HTTP router][30] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/julienschmidt/httprouter/v2][91] | +| [Redis (go-redis)][32] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/go-redis/redis/v2][92] | +| [Redis (go-redis-v8)][34]| Fully Supported | [github.com/DataDog/dd-trace-go/contrib/go-redis/redis.v8/v2][93] | +| [Redis (redigo)][36] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/garyburd/redigo/v2][94] | +| [Redis (new redigo)][38]| Fully Supported | [github.com/DataDog/dd-trace-go/contrib/gomodule/redigo/v2][95] | +| [SQL][40] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/database/sql/v2][96] | +| [SQLx][42] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/jmoiron/sqlx/v2][97] | +| [MongoDB][44] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/go.mongodb.org/mongo-driver/mongo/v2][98] | +| [MongoDB (mgo)[73] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/globalsign/mgo/v2][99] | +| [BuntDB][47] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/tidwall/buntdb/v2][100] | +| [LevelDB][49] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/syndtr/goleveldb/leveldb/v2][101] | +| [miekg/dns][51] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/miekg/dns/v2][102] | +| [Kafka (confluent)][53] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/confluentinc/confluent-kafka-go/v2][103] | +| [Kafka (sarama)][55] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/IBM/sarama/v2][104] | +| [Google API][57] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/google.golang.org/api/v2][105] | +| [go-restful][59] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/emicklei/go-restful.v3/v2][106] | +| [Twirp][61] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/twitchtv/twirp/v2][107] | +| [Vault][63] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/hashicorp/vault/v2][108] | +| [Consul][65] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/hashicorp/consul/v2][109] | +| [Gorm v2][69] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/gorm.io/gorm.v1/v2][110] | +| [Kubernetes][71] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/k8s.io/client-go/kubernetes/v2][111] | +| [Memcache][73] | Fully Supported | [github.com/DataDog/dd-trace-go/contrib/bradfitz/gomemcache/memcache/v2][112] | + + +Packages must be imported with: + +```go +import "github.com/DataDog/dd-trace-go/contrib///v2" +``` + +[79]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/contrib +[6]: https://gin-gonic.com +[80]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/gin-gonic/gin/v2 +[8]: http://www.gorillatoolkit.org/pkg/mux +[81]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2 +[10]: https://github.com/grpc/grpc-go +[82]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/google.golang.org/grpc/v2 +[13]: https://github.com/go-chi/chi +[83]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2 +[15]: https://github.com/labstack/echo +[84]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/labstack/echo.v4/v2 +[18]: https://github.com/gofiber/fiber +[85]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/gofiber/fiber.v2/v2 +[20]: https://aws.amazon.com/sdk-for-go +[86]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/aws/aws-sdk-go/aws/v2 +[75]: https://aws.github.io/aws-sdk-go-v2/docs/ +[113]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/aws/aws-sdk-go-v2/aws/v2 +[22]: https://github.com/olivere/elastic +[87]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/olivere/elastic.v5/v2 +[24]: https://github.com/gocql/gocql +[88]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/gocql/gocql/v2 +[26]: https://github.com/graph-gophers/graphql-go +[89]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/graph-gophers/graphql-go/v2 +[28]: https://golang.org/pkg/net/http +[90]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/net/http/v2 +[30]: https://github.com/julienschmidt/httprouter +[91]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/julienschmidt/httprouter/v2 +[32]: https://github.com/go-redis/redis +[92]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/go-redis/redis/v2 +[34]: https://github.com/go-redis/redis/v8 +[93]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/go-redis/redis.v8/v2 +[36]: https://github.com/garyburd/redigo +[94]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/garyburd/redigo/v2 +[38]: https://github.com/gomodule/redigo +[95]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/gomodule/redigo/v2 +[40]: https://golang.org/pkg/database/sql +[96]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/database/sql/v2 +[42]: https://github.com/jmoiron/sqlx +[97]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/jmoiron/sqlx/v2 +[44]: https://github.com/mongodb/mongo-go-driver +[98]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/go.mongodb.org/mongo-driver/mongo/v2 +[73]: https://github.com/bradfitz/gomemcache/memcache +[99]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/globalsign/mgo/v2 +[47]: https://github.com/tidwall/buntdb +[100]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/tidwall/buntdb/v2 +[49]: https://github.com/syndtr/goleveldb +[101]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/syndtr/goleveldb/leveldb/v2 +[51]: https://github.com/miekg/dns +[102]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/miekg/dns/v2 +[53]: https://github.com/confluentinc/confluent-kafka-go +[103]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/confluentinc/confluent-kafka-go/v2 +[55]: https://github.com/Shopify/sarama +[104]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/IBM/sarama/v2 +[57]: https://github.com/googleapis/google-api-go-client +[105]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/google.golang.org/api/v2 +[59]: https://github.com/emicklei/go-restful +[106]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/emicklei/go-restful.v3/v2 +[61]: https://github.com/twitchtv/twirp +[107]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/twitchtv/twirp/v2 +[63]: https://github.com/hashicorp/vault +[108]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/hashicorp/vault/v2 +[65]: https://github.com/hashicorp/consul +[109]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/hashicorp/consul/v2 +[69]: https://gorm.io/ +[110]: https://github.com/DataDog/dd-trace-go/contrib/gorm.io/gorm.v1/v2 +[71]: https://github.com/kubernetes/client-go +[111]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/k8s.io/client-go/kubernetes/v2 +[73]: https://github.com/bradfitz/gomemcache/memcache +[112]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/bradfitz/gomemcache/memcache/v2 +{{% /tab %}} +{{< /tabs >}} + +## Further Reading + +{{< partial name="whats-next/whats-next.html" >}} + +[1]: https://github.com/DataDog/dd-trace-go +[2]: https://github.com/DataDog/dd-trace-go?tab=readme-ov-file#go-support-policy +[4]: https://www.datadoghq.com/support/ [77]: /tracing/trace_collection/library_config/go/ -[78]: /tracing/trace_collection/automatic_instrumentation/dd_libraries/go/?tab=manualinstrumentation#activate-go-integrations-to-create-spans +[78]: /tracing/trace_collection/automatic_instrumentation/dd_libraries/go/?tab=manualinstrumentation#activate-go-integrations-to-create-spans \ No newline at end of file diff --git a/content/en/tracing/trace_collection/custom_instrumentation/go/dd-api.md b/content/en/tracing/trace_collection/custom_instrumentation/go/dd-api.md index c54133d1455ba..62e199e7d2ba6 100644 --- a/content/en/tracing/trace_collection/custom_instrumentation/go/dd-api.md +++ b/content/en/tracing/trace_collection/custom_instrumentation/go/dd-api.md @@ -7,7 +7,7 @@ aliases: - /tracing/setup_overview/custom_instrumentation/go - /tracing/trace_collection/custom_instrumentation/go - /tracing/trace_collection/custom_instrumentation/dd_libraries/go -description: 'Instrument your code with the Datadog API tracer.' +description: 'Instrument your code with the Datadog Go APM tracer.' code_lang: dd-api type: multi-code-lang code_lang_weight: 1 @@ -19,10 +19,15 @@ further_reading: tag: 'Documentation' text: 'Explore your services, resources, and traces' --- +
If you have not yet read the instructions for auto-instrumentation and setup, start with the Go Setup Instructions.
+
+The Go Tracer v2 is in Preview! See the migration guide to upgrade. +
+ This page details common use cases for adding and customizing observability with Datadog APM. ## Adding tags @@ -40,7 +45,8 @@ import ( "log" "net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func handler(w http.ResponseWriter, r *http.Request) { @@ -70,7 +76,8 @@ package main import ( "net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func handler(w http.ResponseWriter, r *http.Request) { @@ -89,7 +96,10 @@ Add [tags][1] to all [spans][2] by configuring the tracer with the `WithGlobalTa ```go package main -import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +import ( + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x +) func main() { tracer.Start( @@ -109,9 +119,6 @@ err := someOperation() span.Finish(tracer.WithError(err)) ``` -**Note**: Closing a span that was not started in your code can lead to missing data. -Please follow your specific `dd-trace-go` [integration documentation][10] to do that. - ## Adding spans If you aren't using supported library instrumentation (see [Library compatibility][3]), you may want to to manually instrument your code. @@ -122,9 +129,9 @@ Unlike other Datadog tracing libraries, when tracing Go applications, it's recom ### Manually creating a new span -To make use of manual instrumentation, use the `tracer` package which is documented on Datadog's [godoc page][4]: +To make use of manual instrumentation, use the `tracer` package which is documented on Datadog's [godoc page][4] (or [the v2 godoc page][12]): -There are two functions available to create spans. API details are available for `StartSpan` [here][5] and for `StartSpanFromContext` [here][6]. +There are two functions available to create spans. API details are available for `StartSpan` [here][5] (or [here for v2][13]) and for `StartSpanFromContext` [here][6] (or [here for v2][14]). ```go //Create a span with a resource name, which is the child of parentSpan. @@ -160,7 +167,8 @@ package main import ( "net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func handler(w http.ResponseWriter, r *http.Request) { @@ -186,7 +194,8 @@ package main import ( "net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func handler(w http.ResponseWriter, r *http.Request) { @@ -226,5 +235,7 @@ Traces can be excluded based on their resource name, to remove synthetic traffic [6]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#StartSpanFromContext [7]: /tracing/glossary/#trace [9]: /tracing/security -[10]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1 [11]: /tracing/trace_collection/trace_context_propagation/ +[12]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer +[13]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer#StartSpan +[14]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer#StartSpanFromContext \ No newline at end of file diff --git a/content/en/tracing/trace_collection/custom_instrumentation/go/migration.md b/content/en/tracing/trace_collection/custom_instrumentation/go/migration.md new file mode 100644 index 0000000000000..f74821e628d5e --- /dev/null +++ b/content/en/tracing/trace_collection/custom_instrumentation/go/migration.md @@ -0,0 +1,50 @@ +--- +title: Migrating from v1 to v2 of the Go Tracer +description: 'Upgrade your Go tracer from v1 to v2.' +aliases: + - /tracing/trace_collection/custom_instrumentation/opentracing/go +further_reading: + - link: 'tracing/trace_collection/custom_instrumentation/go/dd-api' + tag: 'Documentation' + text: 'Get started with v1 of the Go Tracer' +--- + +
This documentation assumes that you are using version v1.x of the Go tracer. If you are already using v2.x, see Go Custom Instrumentation using the Datadog API instead.
+ +This guide explains how to migrate from Go Tracer v1.x to v2. See [Go tracer support][2]. + +Version 2 of the Go tracer introduces significant API improvements: + +- Removes interfaces to enable future flexibility +- Isolates integrations to prevent false positives from security scanners +- Enforces library patterns to prevent misuse + +To simplify the migration process, Datadog provides a migration tool that handles essential code updates automatically. + +## Migration tool features + +The migration tool automatically updates your tracing code when upgrading from `dd-trace-go` v1.x to v2.0. It makes the following changes: + +* Updates import URLs from `dd-trace-go.v1` to `dd-trace-go/v2`. +* Moves imports and using certain types from `ddtrace/tracer` to `ddtrace`. +* Converts `Span` and `SpanContext` calls to use pointers. +* Replaces unsupported `WithServiceName()` calls with `WithService()`. +* Updates `TraceID()` calls to `TraceIDLower()` for obtaining `uint64` trace IDs. + +## Using the migration tool + +Run these commands to use the migration tool: + +```shell +go get github.com/DataDog/dd-trace-go/v2/tools/v2check +go run github.com/DataDog/dd-trace-go/v2/tools/v2check/main.go +``` + +For more information about the migration, see the [godoc page for dd-trace-go v2][1]. + +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} + +[1]: https://godoc.org/github.com/DataDog/dd-trace-go/v2/ +[2]: /tracing/trace_collection/compatibility/go/?tab=v1#go-tracer-support \ No newline at end of file diff --git a/content/en/tracing/trace_collection/custom_instrumentation/go/otel.md b/content/en/tracing/trace_collection/custom_instrumentation/go/otel.md index c1cbbbf65f48c..09bb3a964b4e7 100644 --- a/content/en/tracing/trace_collection/custom_instrumentation/go/otel.md +++ b/content/en/tracing/trace_collection/custom_instrumentation/go/otel.md @@ -28,9 +28,12 @@ import ( "log" "os" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" - ddotel "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry" - ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" // 1.x + ddotel "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry" // 1.x + ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/opentelemetry" // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" @@ -49,18 +52,25 @@ To configure OpenTelemetry to use the Datadog trace provider: go get go.opentelemetry.io/otel ``` -3. Install the Datadog OpenTelemetry wrapper package `gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry` using the command: +3. Install the Datadog OpenTelemetry wrapper package using the command: ```shell go get gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry ``` + If you are using v2, do: + + ```shell + go get github.com/DataDog/dd-trace-go/v2/ddtrace/opentelemetry + ``` + 4. Import packages in the code: ```go import ( - "go.opentelemetry.io/otel" - ddotel "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry" + "go.opentelemetry.io/otel" + ddotel "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/opentelemetry" // 2.x ) ``` diff --git a/content/en/tracing/trace_collection/custom_instrumentation/opentracing/_index.md b/content/en/tracing/trace_collection/custom_instrumentation/opentracing/_index.md index bee84b5a3aee7..cee7ae84b6d44 100644 --- a/content/en/tracing/trace_collection/custom_instrumentation/opentracing/_index.md +++ b/content/en/tracing/trace_collection/custom_instrumentation/opentracing/_index.md @@ -16,7 +16,6 @@ Read more for your language: {{< nextlink href="/tracing/trace_collection/custom_instrumentation/opentracing/java" >}}Java{{< /nextlink >}} {{< nextlink href="/tracing/trace_collection/custom_instrumentation/opentracing/python" >}}Python{{< /nextlink >}} {{< nextlink href="/tracing/trace_collection/custom_instrumentation/opentracing/ruby" >}}Ruby{{< /nextlink >}} - {{< nextlink href="/tracing/trace_collection/custom_instrumentation/opentracing/go" >}}Go{{< /nextlink >}} {{< nextlink href="/tracing/trace_collection/custom_instrumentation/opentracing/nodejs" >}}Node.js{{< /nextlink >}} {{< nextlink href="/tracing/trace_collection/custom_instrumentation/opentracing/php" >}}PHP{{< /nextlink >}} {{< nextlink href="/tracing/trace_collection/custom_instrumentation/opentracing/dotnet" >}}.NET{{< /nextlink >}} diff --git a/content/en/tracing/trace_collection/custom_instrumentation/opentracing/go.md b/content/en/tracing/trace_collection/custom_instrumentation/opentracing/go.md deleted file mode 100644 index 50ff96b5fb548..0000000000000 --- a/content/en/tracing/trace_collection/custom_instrumentation/opentracing/go.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Go OpenTracing Instrumentation -aliases: -- /tracing/setup_overview/open_standards/go -- /tracing/trace_collection/open_standards/go -- /tracing/trace_collection/opentracing/go/ -description: 'OpenTracing Instrumentation for Go' -code_lang: go -type: multi-code-lang -code_lang_weight: 30 ---- - - -Datadog supports the OpenTracing standard. For more details and information, view the [OpenTracing API][1], or see the setup information below. - -## Setup - -Import the [`opentracer` package][2] to expose the Datadog tracer as an [OpenTracing][3] compatible tracer. - -A basic usage example: - -```go -package main - -import ( - "github.com/opentracing/opentracing-go" - - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" -) - -func main() { - // Start the regular tracer and return it as an opentracing.Tracer interface. You - // may use the same set of options as you normally would with the Datadog tracer. - t := opentracer.New(tracer.WithServiceName("")) - - // Stop it using the regular Stop call for the tracer package. - defer tracer.Stop() - - // Set the global OpenTracing tracer. - opentracing.SetGlobalTracer(t) - - // Use the OpenTracing API as usual. -} -``` - -[1]: https://github.com/opentracing/opentracing-go -[2]: https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer -[3]: http://opentracing.io diff --git a/content/en/tracing/trace_collection/library_config/go.md b/content/en/tracing/trace_collection/library_config/go.md index da9639e65d0bb..ab99b1fb1368e 100644 --- a/content/en/tracing/trace_collection/library_config/go.md +++ b/content/en/tracing/trace_collection/library_config/go.md @@ -10,6 +10,9 @@ further_reading: - link: "https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/ddtrace" tag: "External Site" text: "Package page" +- link: "https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace" + tag: "External Site" + text: "v2 Package page" - link: "/tracing/glossary/" tag: "Documentation" text: "Explore your services, resources and traces" @@ -33,7 +36,8 @@ You may also elect to provide `env`, `service`, and `version` through the tracer package main import ( - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func main() { @@ -59,7 +63,7 @@ func main() { ``` The Go tracer supports additional environment variables and functions for configuration. -See all available options in the [configuration documentation][3]. +See all available options in the [configuration documentation][3] (or [configuration documentation v2][20]). ### Unified service tagging @@ -204,3 +208,4 @@ The [APM environment name][7] may be configured [in the Agent][8] or using the [ [17]: https://docs.datadoghq.com/tracing/metrics/runtime_metrics/go [18]: https://docs.datadoghq.com/tracing/trace_collection/trace_context_propagation/ [19]: /opentelemetry/interoperability/environment_variable_support +[20]: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer#StartOption diff --git a/content/en/tracing/trace_pipeline/ingestion_mechanisms.md b/content/en/tracing/trace_pipeline/ingestion_mechanisms.md index 0239db243e787..acdb3184aaff0 100644 --- a/content/en/tracing/trace_pipeline/ingestion_mechanisms.md +++ b/content/en/tracing/trace_pipeline/ingestion_mechanisms.md @@ -458,8 +458,10 @@ package main import ( "log" "net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" // 1.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func handler(w http.ResponseWriter, r *http.Request) { @@ -483,8 +485,10 @@ import ( "log" "net/http" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" // 1.x + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" // 2.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func handler(w http.ResponseWriter, r *http.Request) { diff --git a/content/en/tracing/troubleshooting/tracer_debug_logs.md b/content/en/tracing/troubleshooting/tracer_debug_logs.md index 51385adcd8364..33ddcdf4d3831 100644 --- a/content/en/tracing/troubleshooting/tracer_debug_logs.md +++ b/content/en/tracing/troubleshooting/tracer_debug_logs.md @@ -6,29 +6,7 @@ further_reading: text: "Troubleshooting APM Connection Errors" --- -## Automated debug log collection - -
Automated debug logs are supported for Java, .NET, Node.js, and Python. For other languages, use manual debug log collection instead.
- -A flare allows you to send necessary troubleshooting information to the Datadog support team, including tracer logs, with sensitive data removed. Flares are useful for troubleshooting issues like high CPU usage, high memory usage, and missing spans. - -### Prerequisites - -- To send a flare from the Datadog site, make sure you've enabled [Fleet Automation][4] and [Remote Configuration][5] on the Agent. -- You must have a supported tracer version: - - Java: `1.26.0` or greater - - Python: `2.11.0` or greater - - Node.js: `5.15.0` or greater, or `4.39.0` or greater - - .NET: `2.48.0` or greater - -### Send a flare - -{{% remote-flare %}} -For example: - -{{< img src="agent/fleet_automation/fleet-automation-flare-agent-and-tracer-debuglevel.png" alt="The Send Ticket button launches a form to send a flare for an existing or new support ticket" style="width:60%;" >}} - -## Manual debug log collection +## Enable debug mode Use Datadog debug settings to diagnose issues or audit trace data. Datadog does not recommend that you enable debug mode in production systems because it increases the number of events that are sent to your loggers. Use debug mode for debugging purposes only. @@ -142,7 +120,10 @@ or enable the debug mode during the `Start` config: ```go package main -import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +import ( + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x +) func main() { tracer.Start(tracer.WithDebugMode(true)) @@ -162,7 +143,8 @@ package main import ( "time" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" // 1.x + // "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" // 2.x ) func main() { @@ -505,6 +487,3 @@ Available starting in 0.98.0: [1]: /help/ [2]: /agent/troubleshooting/#send-a-flare -[3]: /agent/remote_config -[4]: /agent/fleet_automation/ -[5]: /agent/remote_config#enabling-remote-configuration diff --git a/content/en/watchdog/alerts/_index.md b/content/en/watchdog/alerts/_index.md index 934e2d9b64c4b..9ebc375f26d35 100644 --- a/content/en/watchdog/alerts/_index.md +++ b/content/en/watchdog/alerts/_index.md @@ -133,6 +133,10 @@ Watchdog looks at infrastructure metrics from the following integrations: * [System][1], for host-level memory usage (memory leaks) and TCP retransmit rate. * [Redis][2] * [PostgreSQL][3] + * [MySQL][15] + * [SQLServer][16] + * [Cassandra][17] + * [Oracle Database][18] * [NGINX][4] * [Docker][13] * [Kubernetes][14] @@ -164,6 +168,10 @@ Watchdog starts finding anomalies after the minimum required history is availabl [12]: /serverless/ [13]: /containers/docker/?tab=standard [14]: /containers/kubernetes/installation/?tab=operator +[15]: /integrations/mysql/ +[16]: /integrations/mysql/ +[17]: /integrations/cassandra/ +[18]: /integrations/oracle/ {{% /tab %}} {{< /tabs >}} diff --git a/content/en/watchdog/faulty_cloud_saas_api_detection.md b/content/en/watchdog/faulty_cloud_saas_api_detection.md new file mode 100644 index 0000000000000..c6434da61bc29 --- /dev/null +++ b/content/en/watchdog/faulty_cloud_saas_api_detection.md @@ -0,0 +1,28 @@ +--- +title: Automatic Faulty Cloud & SaaS API Detection +further_reading: +# - link: "https://www.datadoghq.com/blog//" +# tag: "Blog" +# text: "Stay ahead of service disruptions with Watchdog Cloud and API Outage Detection" +- link: "watchdog/faulty_deployment_detection/" + tag: "Documentation" + text: "Learn about Watchdog Faulty Service Deployment Detections" +--- + +## Overview + +Automatic Faulty Cloud & SaaS API Detection detects third-party providers (payment gateways, cloud providers, and so on) having issues within minutes, reducing mean time to detection (MTTD). Watchdog uses APM telemetry to continuously monitor for elevated error rates in requests to external providers—such as AWS, OpenAI, Slack, Stripe, and more—to detect service degradation as soon as it occurs. This proactive detection gives you a head start in identifying and mitigating issues before they escalate, significantly reducing time spent on root cause analysis and improving response times. + +When Watchdog identifies that an external provider you are using is faulty, it flags the services impacted by the problem and the extent of the disruption. This allows you to differentiate between external and internal issues. Datadog also provides direct links to the provider's status page and support channels, so you can reach out to them as needed. + +{{< img src="watchdog/external_provider_outage.png" alt="Faulty SaaS API vendor detection" >}} + +Whenever a faulty deployment is detected, Watchdog creates an event in the [Event Explorer][1]. You can set up a monitor to get automatically notified on such events: + +1. Go to the [New Monitor][2] page. +2. Choose **Watchdog**. +3. Select `Third Party` in the alert category. + + +[1]: https://app.datadoghq.com/event/explorer +[2]: https://app.datadoghq.com/monitors/create diff --git a/content/es/monitors/types/anomaly.md b/content/es/monitors/types/anomaly.md index 41a260bd6cb99..de9b29298e363 100644 --- a/content/es/monitors/types/anomaly.md +++ b/content/es/monitors/types/anomaly.md @@ -136,17 +136,17 @@ En este ejemplo, `basic` identifica con éxito los picos de anomalías fuera del #### Comparación de la detección de anomalías para la temporalidad semanal En este ejemplo, la métrica muestra un cambio de nivel repentino. `Agile` se ajusta más rápidamente al cambio de nivel que `robust`. Además, la amplitud de los límites de `robust` aumenta para reflejar una mayor incertidumbre tras el cambio de nivel; la amplitud de los límites de `agile` permanece invariable. `Basic` no se ajusta bien a este escenario, en el que la métrica muestra un fuerte patrón temporal semanal. -{{< img src="monitors/monitor_types/Anomalía/alg_comparison_2.png" alt="comparación del algoritmo de detección de anomalías con la temporalidad semanal" style="width:90%;">}} +{{< img src="monitors/monitor_types/anomaly/alg_comparison_2.png" alt="comparación del algoritmo de detección de anomalías con la temporalidad semanal" style="width:90%;">}} #### Comparación de las reacciones de los algoritmos al cambio Este ejemplo muestra cómo reaccionan los algoritmos ante una anomalía de una hora de duración. `Robust` no ajusta los límites de la anomalía en este escenario, ya que reacciona más lentamente ante los cambios bruscos. Los demás algoritmos empiezan a comportarse como si la anomalía fuera la nueva normalidad. `Agile` identifica incluso el regreso de la métrica a su nivel original como una anomalía. -{{< img src="monitors/monitor_types/Anomalía/alg_comparison_3.png" alt="comparación del algoritmo de detección de anomalías con la temporalidad horaria" style="width:90%;">}} +{{< img src="monitors/monitor_types/anomaly/alg_comparison_3.png" alt="comparación del algoritmo de detección de anomalías con la temporalidad horaria" style="width:90%;">}} #### Comparación de las reacciones de los algoritmos a las escalas Los algoritmos tratan a las escalas de forma diferente. `Basic` y `robust` son insensibles a las escalas, mientras que `agile` no lo es. Los gráficos abajo a la izquierda muestran que `agile` y `robust` señalan el cambio de nivel como anómalo. A la derecha, se añade 1000 a la misma métrica y `agile` deja de señalar el cambio de nivel como anómalo, mientras que `robust` sigue haciéndolo. -{{< img src="monitors/monitor_types/Anomalía/alg_comparison_scale.png" alt="comparación del algoritmo con las escalas" style="width:90%;">}} +{{< img src="monitors/monitor_types/anomaly/alg_comparison_scale.png" alt="comparación del algoritmo con las escalas" style="width:90%;">}} #### Comparación de la detección de anomalías para nuevas métricas Este ejemplo muestra cómo cada algoritmo trata una métrica nueva. `Robust` y `agile` no muestran ningún límite durante las primeras temporalidades (semanales). `Basic` empieza a mostrar límites poco después de que aparece la métrica por primera vez. @@ -266,4 +266,4 @@ La configuración estándar de umbrales y de ventanas de umbrales tiene el sigui [14]: /es/api/v1/monitors/ [15]: /es/monitors/guide/anomaly-monitor/ [16]: /es/monitors/guide/how-to-update-anomaly-monitor-timezone/ -[17]: /es/help/ \ No newline at end of file +[17]: /es/help/ diff --git a/content/ja/logs/guide/azure-logging-guide.md b/content/ja/logs/guide/azure-logging-guide.md index 8f94628d54a51..890944beb6880 100644 --- a/content/ja/logs/guide/azure-logging-guide.md +++ b/content/ja/logs/guide/azure-logging-guide.md @@ -147,7 +147,7 @@ Azure プラットフォームのログ (リソースログを含む) を送信 3. ポリシー名を入力し、**Listen** を選択します。 4. **Connection string-primary key** の値をコピーし、安全な場所に保管してください。これは Datadog-Azure 関数が Event Hub と通信するために必要です。 -{{< img src="integration/azure/eventhub_connection_string.png" alt="イベントハブの共有アクセスポリシーの接続文字列プライマリキー値" popup="true" style="width:100%">}} +{{< img src="integrations/azure/eventhub_connection_string.png" alt="イベントハブの共有アクセスポリシーの接続文字列プライマリキー値" popup="true" style="width:100%">}} #### Datadog-Azure 関数を作成 diff --git a/data/api/v1/full_spec.yaml b/data/api/v1/full_spec.yaml index 3ada23b23147b..57d064e03abc3 100644 --- a/data/api/v1/full_spec.yaml +++ b/data/api/v1/full_spec.yaml @@ -6064,7 +6064,7 @@ components: - name type: object LogsPipelineList: - description: Array of pipeline ID strings. + description: Array of all log pipeline objects configured for the organization. items: $ref: '#/components/schemas/LogsPipeline' type: array @@ -20506,7 +20506,7 @@ components: type: integer profiling_host_top99p: description: Shows the 99th percentile of all profiled hosts over all hours - in the current date for all organizations. + within the current date for all organizations. format: int64 type: integer rum_browser_and_mobile_session_count: @@ -20595,10 +20595,25 @@ components: type: integer rum_mobile_lite_session_count_roku_sum: description: Shows the sum of all mobile RUM lite sessions on Roku over - all hours in the current date for all organizations (To be introduced + all hours within the current date for all organizations (To be introduced on October 1st, 2024). format: int64 type: integer + rum_mobile_replay_session_count_android_sum: + description: Shows the sum of all mobile RUM replay sessions on Android + over all hours within the current date for the given org. + format: int64 + type: integer + rum_mobile_replay_session_count_ios_sum: + description: Shows the sum of all mobile RUM replay sessions on iOS over + all hours within the current date for the given org. + format: int64 + type: integer + rum_mobile_replay_session_count_reactnative_sum: + description: Shows the sum of all mobile RUM replay sessions on React Native + over all hours within the current date for the given org. + format: int64 + type: integer rum_replay_session_count_sum: description: Shows the sum of all RUM Session Replay counts over all hours in the current date for all organizations (To be introduced on October @@ -21359,7 +21374,7 @@ components: type: integer profiling_host_top99p: description: Shows the 99th percentile of all profiled hosts over all hours - in the current date for the given org. + within the current date for the given org. format: int64 type: integer public_id: @@ -21458,6 +21473,21 @@ components: 1st, 2024). format: int64 type: integer + rum_mobile_replay_session_count_android_sum: + description: Shows the sum of all mobile RUM replay sessions on Android + over all hours within the current date for the given org. + format: int64 + type: integer + rum_mobile_replay_session_count_ios_sum: + description: Shows the sum of all mobile RUM replay sessions on iOS over + all hours within the current date for the given org. + format: int64 + type: integer + rum_mobile_replay_session_count_reactnative_sum: + description: Shows the sum of all mobile RUM replay sessions on React Native + over all hours within the current date for the given org. + format: int64 + type: integer rum_replay_session_count_sum: description: Shows the sum of all RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, @@ -22346,10 +22376,25 @@ components: type: integer rum_mobile_lite_session_count_roku_agg_sum: description: Shows the sum of all mobile RUM lite sessions on Roku over - all hours in the current month for all organizations (To be introduced + all hours within the current month for all organizations (To be introduced on October 1st, 2024). format: int64 type: integer + rum_mobile_replay_session_count_android_agg_sum: + description: Shows the sum of all mobile RUM replay sessions on Android + over all hours within the current month for all organizations. + format: int64 + type: integer + rum_mobile_replay_session_count_ios_agg_sum: + description: Shows the sum of all mobile RUM replay sessions on iOS over + all hours within the current month for all organizations. + format: int64 + type: integer + rum_mobile_replay_session_count_reactnative_agg_sum: + description: Shows the sum of all mobile RUM replay sessions on React Native + over all hours within the current month for all organizations. + format: int64 + type: integer rum_replay_session_count_agg_sum: description: Shows the sum of all RUM Session Replay counts over all hours in the current month for all organizations (To be introduced on October @@ -29985,7 +30030,7 @@ paths: type: idempotent /api/v1/monitor: get: - description: Get details about the specified monitor from your organization. + description: Get all monitors from your organization. operationId: ListMonitors parameters: - description: 'When specified, shows additional information about the group @@ -30068,6 +30113,7 @@ paths: content: application/json: schema: + description: An array of monitor objects. items: $ref: '#/components/schemas/Monitor' type: array @@ -30091,7 +30137,7 @@ paths: appKeyAuth: [] - AuthZ: - monitors_read - summary: Get all monitor details + summary: Get all monitors tags: - Monitors x-menu-order: 2 @@ -38762,10 +38808,17 @@ tags: for more information. name: Embeddable Graphs - description: 'The Event Management API allows you to programmatically post events - to the Events Explorer + to the Events Explorer and fetch events from the Events Explorer. See the [Event + Management page](https://docs.datadoghq.com/service_management/events/) for more + information. - and fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) - for more information.' + + **Update to Datadog monitor events `aggregation_key` starting March 1, 2025:** + The Datadog monitor events `aggregation_key` is unique to each Monitor ID. Starting + March 1st, this key will also include Monitor Group, making it unique per *Monitor + ID and Monitor Group*. If you''re using monitor events `aggregation_key` in dashboard + queries or the Event API, you must migrate to use `@monitor.id`. Reach out to + [support](https://www.datadoghq.com/support/) if you have any question.' name: Events - description: 'Configure your Datadog-Google Cloud Platform (GCP) integration directly diff --git a/data/api/v1/full_spec_deref.json b/data/api/v1/full_spec_deref.json index 3f7845b225762..eb65b8b789c63 100644 --- a/data/api/v1/full_spec_deref.json +++ b/data/api/v1/full_spec_deref.json @@ -144186,7 +144186,7 @@ "type": "object" }, "LogsPipelineList": { - "description": "Array of pipeline ID strings.", + "description": "Array of all log pipeline objects configured for the organization.", "items": { "description": "Pipelines and processors operate on incoming logs,\nparsing and transforming them into structured attributes for easier querying.\n\n**Note**: These endpoints are only available for admin users.\nMake sure to use an application key created by an admin.", "properties": { @@ -553144,7 +553144,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for the given org.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -553231,6 +553231,21 @@ "format": "int64", "type": "integer" }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, "rum_replay_session_count_sum": { "description": "Shows the sum of all RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).", "format": "int64", @@ -553369,7 +553384,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for all organizations.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for all organizations.", "format": "int64", "type": "integer" }, @@ -553444,7 +553459,22 @@ "type": "integer" }, "rum_mobile_lite_session_count_roku_sum": { - "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current date for all organizations (To be introduced on October 1st, 2024).", + "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current date for all organizations (To be introduced on October 1st, 2024).", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -554186,7 +554216,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for the given org.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -554273,6 +554303,21 @@ "format": "int64", "type": "integer" }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, "rum_replay_session_count_sum": { "description": "Shows the sum of all RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).", "format": "int64", @@ -555232,7 +555277,22 @@ "type": "integer" }, "rum_mobile_lite_session_count_roku_agg_sum": { - "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current month for all organizations (To be introduced on October 1st, 2024).", + "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current month for all organizations (To be introduced on October 1st, 2024).", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_android_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current month for all organizations.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current month for all organizations.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current month for all organizations.", "format": "int64", "type": "integer" }, @@ -556545,7 +556605,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for the given org.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -556632,6 +556692,21 @@ "format": "int64", "type": "integer" }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, "rum_replay_session_count_sum": { "description": "Shows the sum of all RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).", "format": "int64", @@ -556770,7 +556845,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for all organizations.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for all organizations.", "format": "int64", "type": "integer" }, @@ -556845,7 +556920,22 @@ "type": "integer" }, "rum_mobile_lite_session_count_roku_sum": { - "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current date for all organizations (To be introduced on October 1st, 2024).", + "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current date for all organizations (To be introduced on October 1st, 2024).", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -964988,7 +965078,7 @@ "content": { "application/json": { "schema": { - "description": "Array of pipeline ID strings.", + "description": "Array of all log pipeline objects configured for the organization.", "items": { "description": "Pipelines and processors operate on incoming logs,\nparsing and transforming them into structured attributes for easier querying.\n\n**Note**: These endpoints are only available for admin users.\nMake sure to use an application key created by an admin.", "properties": { @@ -971542,7 +971632,7 @@ }, "/api/v1/monitor": { "get": { - "description": "Get details about the specified monitor from your organization.", + "description": "Get all monitors from your organization.", "operationId": "ListMonitors", "parameters": [ { @@ -971633,6 +971723,7 @@ "content": { "application/json": { "schema": { + "description": "An array of monitor objects.", "items": { "description": "Object describing a monitor.", "properties": { @@ -972586,7 +972677,7 @@ ] } ], - "summary": "Get all monitor details", + "summary": "Get all monitors", "tags": [ "Monitors" ], @@ -1132548,7 +1132639,22 @@ "type": "integer" }, "rum_mobile_lite_session_count_roku_agg_sum": { - "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current month for all organizations (To be introduced on October 1st, 2024).", + "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current month for all organizations (To be introduced on October 1st, 2024).", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_android_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current month for all organizations.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current month for all organizations.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current month for all organizations.", "format": "int64", "type": "integer" }, @@ -1133861,7 +1133967,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for the given org.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -1133948,6 +1134054,21 @@ "format": "int64", "type": "integer" }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, "rum_replay_session_count_sum": { "description": "Shows the sum of all RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).", "format": "int64", @@ -1134086,7 +1134207,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for all organizations.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for all organizations.", "format": "int64", "type": "integer" }, @@ -1134161,7 +1134282,22 @@ "type": "integer" }, "rum_mobile_lite_session_count_roku_sum": { - "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current date for all organizations (To be introduced on October 1st, 2024).", + "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current date for all organizations (To be introduced on October 1st, 2024).", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -1138024,7 +1138160,7 @@ "name": "Embeddable Graphs" }, { - "description": "The Event Management API allows you to programmatically post events to the Events Explorer\nand fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information.", + "description": "The Event Management API allows you to programmatically post events to the Events Explorer and fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information.\n\n**Update to Datadog monitor events `aggregation_key` starting March 1, 2025:** The Datadog monitor events `aggregation_key` is unique to each Monitor ID. Starting March 1st, this key will also include Monitor Group, making it unique per *Monitor ID and Monitor Group*. If you're using monitor events `aggregation_key` in dashboard queries or the Event API, you must migrate to use `@monitor.id`. Reach out to [support](https://www.datadoghq.com/support/) if you have any question.", "name": "Events" }, { diff --git a/data/api/v1/translate_actions.json b/data/api/v1/translate_actions.json index f860d3d3bc3f0..3ea2b541dcc25 100644 --- a/data/api/v1/translate_actions.json +++ b/data/api/v1/translate_actions.json @@ -596,8 +596,8 @@ "request_schema_description": "Object with all metric related metadata." }, "ListMonitors": { - "description": "Get details about the specified monitor from your organization.", - "summary": "Get all monitor details" + "description": "Get all monitors from your organization.", + "summary": "Get all monitors" }, "CreateMonitor": { "description": "Create a monitor using the specified options.\n\n#### Monitor Types\n\nThe type of monitor chosen from:\n\n- anomaly: `query alert`\n- APM: `query alert` or `trace-analytics alert`\n- composite: `composite`\n- custom: `service check`\n- forecast: `query alert`\n- host: `service check`\n- integration: `query alert` or `service check`\n- live process: `process alert`\n- logs: `log alert`\n- metric: `query alert`\n- network: `service check`\n- outlier: `query alert`\n- process: `service check`\n- rum: `rum alert`\n- SLO: `slo alert`\n- watchdog: `event-v2 alert`\n- event-v2: `event-v2 alert`\n- audit: `audit alert`\n- error-tracking: `error-tracking alert`\n- database-monitoring: `database-monitoring alert`\n- network-performance: `network-performance alert`\n- cloud cost: `cost alert`\n\n**Notes**:\n- Synthetic monitors are created through the Synthetics API. See the [Synthetics API](https://docs.datadoghq.com/api/latest/synthetics/) documentation for more information.\n- Log monitors require an unscoped App Key.\n\n#### Query Types\n\n##### Metric Alert Query\n\nExample: `time_aggr(time_window):space_aggr:metric{tags} [by {key}] operator #`\n\n- `time_aggr`: avg, sum, max, min, change, or pct_change\n- `time_window`: `last_#m` (with `#` between 1 and 10080 depending on the monitor type) or `last_#h`(with `#` between 1 and 168 depending on the monitor type) or `last_1d`, or `last_1w`\n- `space_aggr`: avg, sum, min, or max\n- `tags`: one or more tags (comma-separated), or *\n- `key`: a 'key' in key:value tag syntax; defines a separate alert for each tag in the group (multi-alert)\n- `operator`: <, <=, >, >=, ==, or !=\n- `#`: an integer or decimal number used to set the threshold\n\nIf you are using the `_change_` or `_pct_change_` time aggregator, instead use `change_aggr(time_aggr(time_window),\ntimeshift):space_aggr:metric{tags} [by {key}] operator #` with:\n\n- `change_aggr` change, pct_change\n- `time_aggr` avg, sum, max, min [Learn more](https://docs.datadoghq.com/monitors/create/types/#define-the-conditions)\n- `time_window` last\\_#m (between 1 and 2880 depending on the monitor type), last\\_#h (between 1 and 48 depending on the monitor type), or last_#d (1 or 2)\n- `timeshift` #m_ago (5, 10, 15, or 30), #h_ago (1, 2, or 4), or 1d_ago\n\nUse this to create an outlier monitor using the following query:\n`avg(last_30m):outliers(avg:system.cpu.user{role:es-events-data} by {host}, 'dbscan', 7) > 0`\n\n##### Service Check Query\n\nExample: `\"check\".over(tags).last(count).by(group).count_by_status()`\n\n- `check` name of the check, for example `datadog.agent.up`\n- `tags` one or more quoted tags (comma-separated), or \"*\". for example: `.over(\"env:prod\", \"role:db\")`; `over` cannot be blank.\n- `count` must be at greater than or equal to your max threshold (defined in the `options`). It is limited to 100.\nFor example, if you've specified to notify on 1 critical, 3 ok, and 2 warn statuses, `count` should be at least 3.\n- `group` must be specified for check monitors. Per-check grouping is already explicitly known for some service checks.\nFor example, Postgres integration monitors are tagged by `db`, `host`, and `port`, and Network monitors by `host`, `instance`, and `url`. See [Service Checks](https://docs.datadoghq.com/api/latest/service-checks/) documentation for more information.\n\n##### Event Alert Query\n\n**Note:** The Event Alert Query has been replaced by the Event V2 Alert Query. For more information, see the [Event Migration guide](https://docs.datadoghq.com/service_management/events/guides/migrating_to_new_events_features/).\n\n##### Event V2 Alert Query\n\nExample: `events(query).rollup(rollup_method[, measure]).last(time_window) operator #`\n\n- `query` The search query - following the [Log search syntax](https://docs.datadoghq.com/logs/search_syntax/).\n- `rollup_method` The stats roll-up method - supports `count`, `avg` and `cardinality`.\n- `measure` For `avg` and cardinality `rollup_method` - specify the measure or the facet name you want to use.\n- `time_window` #m (between 1 and 2880), #h (between 1 and 48).\n- `operator` `<`, `<=`, `>`, `>=`, `==`, or `!=`.\n- `#` an integer or decimal number used to set the threshold.\n\n##### Process Alert Query\n\nExample: `processes(search).over(tags).rollup('count').last(timeframe) operator #`\n\n- `search` free text search string for querying processes.\nMatching processes match results on the [Live Processes](https://docs.datadoghq.com/infrastructure/process/?tab=linuxwindows) page.\n- `tags` one or more tags (comma-separated)\n- `timeframe` the timeframe to roll up the counts. Examples: 10m, 4h. Supported timeframes: s, m, h and d\n- `operator` <, <=, >, >=, ==, or !=\n- `#` an integer or decimal number used to set the threshold\n\n##### Logs Alert Query\n\nExample: `logs(query).index(index_name).rollup(rollup_method[, measure]).last(time_window) operator #`\n\n- `query` The search query - following the [Log search syntax](https://docs.datadoghq.com/logs/search_syntax/).\n- `index_name` For multi-index organizations, the log index in which the request is performed.\n- `rollup_method` The stats roll-up method - supports `count`, `avg` and `cardinality`.\n- `measure` For `avg` and cardinality `rollup_method` - specify the measure or the facet name you want to use.\n- `time_window` #m (between 1 and 2880), #h (between 1 and 48).\n- `operator` `<`, `<=`, `>`, `>=`, `==`, or `!=`.\n- `#` an integer or decimal number used to set the threshold.\n\n##### Composite Query\n\nExample: `12345 && 67890`, where `12345` and `67890` are the IDs of non-composite monitors\n\n* `name` [*required*, *default* = **dynamic, based on query**]: The name of the alert.\n* `message` [*required*, *default* = **dynamic, based on query**]: A message to include with notifications for this monitor.\nEmail notifications can be sent to specific users by using the same '@username' notation as events.\n* `tags` [*optional*, *default* = **empty list**]: A list of tags to associate with your monitor.\nWhen getting all monitor details via the API, use the `monitor_tags` argument to filter results by these tags.\nIt is only available via the API and isn't visible or editable in the Datadog UI.\n\n##### SLO Alert Query\n\nExample: `error_budget(\"slo_id\").over(\"time_window\") operator #`\n\n- `slo_id`: The alphanumeric SLO ID of the SLO you are configuring the alert for.\n- `time_window`: The time window of the SLO target you wish to alert on. Valid options: `7d`, `30d`, `90d`.\n- `operator`: `>=` or `>`\n\n##### Audit Alert Query\n\nExample: `audits(query).rollup(rollup_method[, measure]).last(time_window) operator #`\n\n- `query` The search query - following the [Log search syntax](https://docs.datadoghq.com/logs/search_syntax/).\n- `rollup_method` The stats roll-up method - supports `count`, `avg` and `cardinality`.\n- `measure` For `avg` and cardinality `rollup_method` - specify the measure or the facet name you want to use.\n- `time_window` #m (between 1 and 2880), #h (between 1 and 48).\n- `operator` `<`, `<=`, `>`, `>=`, `==`, or `!=`.\n- `#` an integer or decimal number used to set the threshold.\n\n##### CI Pipelines Alert Query\n\nExample: `ci-pipelines(query).rollup(rollup_method[, measure]).last(time_window) operator #`\n\n- `query` The search query - following the [Log search syntax](https://docs.datadoghq.com/logs/search_syntax/).\n- `rollup_method` The stats roll-up method - supports `count`, `avg`, and `cardinality`.\n- `measure` For `avg` and cardinality `rollup_method` - specify the measure or the facet name you want to use.\n- `time_window` #m (between 1 and 2880), #h (between 1 and 48).\n- `operator` `<`, `<=`, `>`, `>=`, `==`, or `!=`.\n- `#` an integer or decimal number used to set the threshold.\n\n##### CI Tests Alert Query\n\nExample: `ci-tests(query).rollup(rollup_method[, measure]).last(time_window) operator #`\n\n- `query` The search query - following the [Log search syntax](https://docs.datadoghq.com/logs/search_syntax/).\n- `rollup_method` The stats roll-up method - supports `count`, `avg`, and `cardinality`.\n- `measure` For `avg` and cardinality `rollup_method` - specify the measure or the facet name you want to use.\n- `time_window` #m (between 1 and 2880), #h (between 1 and 48).\n- `operator` `<`, `<=`, `>`, `>=`, `==`, or `!=`.\n- `#` an integer or decimal number used to set the threshold.\n\n##### Error Tracking Alert Query\n\n\"New issue\" example: `error-tracking(query).source(issue_source).new().rollup(rollup_method[, measure]).by(group_by).last(time_window) operator #`\n\"High impact issue\" example: `error-tracking(query).source(issue_source).impact().rollup(rollup_method[, measure]).by(group_by).last(time_window) operator #`\n\n- `query` The search query - following the [Log search syntax](https://docs.datadoghq.com/logs/search_syntax/).\n- `issue_source` The issue source - supports `all`, `browser`, `mobile` and `backend` and defaults to `all` if omitted.\n- `rollup_method` The stats roll-up method - supports `count`, `avg`, and `cardinality` and defaults to `count` if omitted.\n- `measure` For `avg` and cardinality `rollup_method` - specify the measure or the facet name you want to use.\n- `group by` Comma-separated list of attributes to group by - should contain at least `issue.id`.\n- `time_window` #m (between 1 and 2880), #h (between 1 and 48).\n- `operator` `<`, `<=`, `>`, `>=`, `==`, or `!=`.\n- `#` an integer or decimal number used to set the threshold.\n\n**Database Monitoring Alert Query**\n\nExample: `database-monitoring(query).rollup(rollup_method[, measure]).last(time_window) operator #`\n\n- `query` The search query - following the [Log search syntax](https://docs.datadoghq.com/logs/search_syntax/).\n- `rollup_method` The stats roll-up method - supports `count`, `avg`, and `cardinality`.\n- `measure` For `avg` and cardinality `rollup_method` - specify the measure or the facet name you want to use.\n- `time_window` #m (between 1 and 2880), #h (between 1 and 48).\n- `operator` `<`, `<=`, `>`, `>=`, `==`, or `!=`.\n- `#` an integer or decimal number used to set the threshold.\n\n**Network Performance Alert Query**\n\nExample: `network-performance(query).rollup(rollup_method[, measure]).last(time_window) operator #`\n\n- `query` The search query - following the [Log search syntax](https://docs.datadoghq.com/logs/search_syntax/).\n- `rollup_method` The stats roll-up method - supports `count`, `avg`, and `cardinality`.\n- `measure` For `avg` and cardinality `rollup_method` - specify the measure or the facet name you want to use.\n- `time_window` #m (between 1 and 2880), #h (between 1 and 48).\n- `operator` `<`, `<=`, `>`, `>=`, `==`, or `!=`.\n- `#` an integer or decimal number used to set the threshold.", diff --git a/data/api/v1/translate_tags.json b/data/api/v1/translate_tags.json index 3d8a5a9f29a5e..806e37f162823 100644 --- a/data/api/v1/translate_tags.json +++ b/data/api/v1/translate_tags.json @@ -33,7 +33,7 @@ }, "events": { "name": "Events", - "description": "The Event Management API allows you to programmatically post events to the Events Explorer\nand fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information." + "description": "The Event Management API allows you to programmatically post events to the Events Explorer and fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information.\n\n**Update to Datadog monitor events `aggregation_key` starting March 1, 2025:** The Datadog monitor events `aggregation_key` is unique to each Monitor ID. Starting March 1st, this key will also include Monitor Group, making it unique per *Monitor ID and Monitor Group*. If you're using monitor events `aggregation_key` in dashboard queries or the Event API, you must migrate to use `@monitor.id`. Reach out to [support](https://www.datadoghq.com/support/) if you have any question." }, "gcp-integration": { "name": "GCP Integration", diff --git a/data/api/v2/full_spec.yaml b/data/api/v2/full_spec.yaml index 2ed6348ac98f0..138ff5358eaba 100644 --- a/data/api/v2/full_spec.yaml +++ b/data/api/v2/full_spec.yaml @@ -2669,6 +2669,57 @@ components: $ref: '#/components/schemas/AwsCURConfig' type: array type: object + AwsScanOptionsAttributes: + description: Attributes for the AWS scan options. + properties: + lambda: + description: Indicates if scanning of Lambda functions is enabled. + example: true + type: boolean + sensitive_data: + description: Indicates if scanning for sensitive data is enabled. + example: false + type: boolean + vuln_containers_os: + description: Indicates if scanning for vulnerabilities in containers is + enabled. + example: true + type: boolean + vuln_host_os: + description: Indicates if scanning for vulnerabilities in hosts is enabled. + example: true + type: boolean + type: object + AwsScanOptionsData: + description: Single AWS Scan Options entry. + properties: + attributes: + $ref: '#/components/schemas/AwsScanOptionsAttributes' + id: + description: The ID of the AWS account. + example: '184366314700' + type: string + type: + $ref: '#/components/schemas/AwsScanOptionsType' + type: object + AwsScanOptionsResponse: + description: Response object that includes a list of AWS scan options. + properties: + data: + description: A list of AWS scan options. + items: + $ref: '#/components/schemas/AwsScanOptionsData' + type: array + type: object + AwsScanOptionsType: + default: aws_scan_options + description: The type of the resource. The value should always be `aws_scan_options`. + enum: + - aws_scan_options + example: aws_scan_options + type: string + x-enum-varnames: + - AWS_SCAN_OPTIONS AzureUCConfig: description: Azure config. properties: @@ -29673,6 +29724,27 @@ info: version: '1.0' openapi: 3.0.0 paths: + /api/v2/agentless_scanning/accounts/aws: + get: + description: Fetches the scan options configured for AWS accounts. + operationId: ListAwsScanOptions + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/AwsScanOptionsResponse' + description: OK + '403': + $ref: '#/components/responses/NotAuthorizedResponse' + '429': + $ref: '#/components/responses/TooManyRequestsResponse' + summary: Get AWS Scan Options + tags: + - Agentless Scanning + x-menu-order: 3 + x-undo: + type: safe /api/v2/api_keys: get: description: List all API keys available for your account. @@ -39484,7 +39556,8 @@ paths: /api/v2/metrics/{metric_name}/active-configurations: get: description: List tags and aggregations that are actively queried on dashboards, - notebooks, monitors, and the Metrics Explorer for a given metric name. + notebooks, monitors, the Metrics Explorer, and using the API for a given metric + name. operationId: ListActiveMetricConfigurations parameters: - $ref: '#/components/parameters/MetricName' @@ -50241,6 +50314,11 @@ tags: externalDocs: url: https://docs.datadoghq.com/integrations/amazon_web_services/#log-collection name: AWS Logs Integration +- description: "Datadog Agentless Scanning provides visibility into risks and vulnerabilities\nwithin + your hosts, running containers, and serverless functions\u2014all without\nrequiring + teams to install Agents on every host or where Agents cannot be installed.\nGo + to https://www.datadoghq.com/blog/agentless-scanning/ to learn more" + name: Agentless Scanning - description: Search your Audit Logs events over HTTP. name: Audit - description: '[The AuthN Mappings API](https://docs.datadoghq.com/account_management/authn_mapping/?tab=example) @@ -50337,10 +50415,17 @@ tags: end times, prevent all alerting related to specified Datadog tags.' name: Downtimes - description: 'The Event Management API allows you to programmatically post events - to the Events Explorer + to the Events Explorer and fetch events from the Events Explorer. See the [Event + Management page](https://docs.datadoghq.com/service_management/events/) for more + information. - and fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) - for more information.' + + **Update to Datadog monitor events `aggregation_key` starting March 1, 2025:** + The Datadog monitor events `aggregation_key` is unique to each Monitor ID. Starting + March 1st, this key will also include Monitor Group, making it unique per *Monitor + ID and Monitor Group*. If you''re using monitor events `aggregation_key` in dashboard + queries or the Event API, you must migrate to use `@monitor.id`. Reach out to + [support](https://www.datadoghq.com/support/) if you have any question.' name: Events - description: Manage your Datadog Fastly integration accounts and services directly through the Datadog API. See the [Fastly integration page](https://docs.datadoghq.com/integrations/fastly/) diff --git a/data/api/v2/full_spec_deref.json b/data/api/v2/full_spec_deref.json index 04343f9127cd5..8f7c9afa3cfea 100644 --- a/data/api/v2/full_spec_deref.json +++ b/data/api/v2/full_spec_deref.json @@ -13469,6 +13469,152 @@ }, "type": "object" }, + "AwsScanOptionsAttributes": { + "description": "Attributes for the AWS scan options.", + "properties": { + "lambda": { + "description": "Indicates if scanning of Lambda functions is enabled.", + "example": true, + "type": "boolean" + }, + "sensitive_data": { + "description": "Indicates if scanning for sensitive data is enabled.", + "example": false, + "type": "boolean" + }, + "vuln_containers_os": { + "description": "Indicates if scanning for vulnerabilities in containers is enabled.", + "example": true, + "type": "boolean" + }, + "vuln_host_os": { + "description": "Indicates if scanning for vulnerabilities in hosts is enabled.", + "example": true, + "type": "boolean" + } + }, + "type": "object" + }, + "AwsScanOptionsData": { + "description": "Single AWS Scan Options entry.", + "properties": { + "attributes": { + "description": "Attributes for the AWS scan options.", + "properties": { + "lambda": { + "description": "Indicates if scanning of Lambda functions is enabled.", + "example": true, + "type": "boolean" + }, + "sensitive_data": { + "description": "Indicates if scanning for sensitive data is enabled.", + "example": false, + "type": "boolean" + }, + "vuln_containers_os": { + "description": "Indicates if scanning for vulnerabilities in containers is enabled.", + "example": true, + "type": "boolean" + }, + "vuln_host_os": { + "description": "Indicates if scanning for vulnerabilities in hosts is enabled.", + "example": true, + "type": "boolean" + } + }, + "type": "object" + }, + "id": { + "description": "The ID of the AWS account.", + "example": "184366314700", + "type": "string" + }, + "type": { + "default": "aws_scan_options", + "description": "The type of the resource. The value should always be `aws_scan_options`.", + "enum": [ + "aws_scan_options" + ], + "example": "aws_scan_options", + "type": "string", + "x-enum-varnames": [ + "AWS_SCAN_OPTIONS" + ] + } + }, + "type": "object" + }, + "AwsScanOptionsResponse": { + "description": "Response object that includes a list of AWS scan options.", + "properties": { + "data": { + "description": "A list of AWS scan options.", + "items": { + "description": "Single AWS Scan Options entry.", + "properties": { + "attributes": { + "description": "Attributes for the AWS scan options.", + "properties": { + "lambda": { + "description": "Indicates if scanning of Lambda functions is enabled.", + "example": true, + "type": "boolean" + }, + "sensitive_data": { + "description": "Indicates if scanning for sensitive data is enabled.", + "example": false, + "type": "boolean" + }, + "vuln_containers_os": { + "description": "Indicates if scanning for vulnerabilities in containers is enabled.", + "example": true, + "type": "boolean" + }, + "vuln_host_os": { + "description": "Indicates if scanning for vulnerabilities in hosts is enabled.", + "example": true, + "type": "boolean" + } + }, + "type": "object" + }, + "id": { + "description": "The ID of the AWS account.", + "example": "184366314700", + "type": "string" + }, + "type": { + "default": "aws_scan_options", + "description": "The type of the resource. The value should always be `aws_scan_options`.", + "enum": [ + "aws_scan_options" + ], + "example": "aws_scan_options", + "type": "string", + "x-enum-varnames": [ + "AWS_SCAN_OPTIONS" + ] + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "AwsScanOptionsType": { + "default": "aws_scan_options", + "description": "The type of the resource. The value should always be `aws_scan_options`.", + "enum": [ + "aws_scan_options" + ], + "example": "aws_scan_options", + "type": "string", + "x-enum-varnames": [ + "AWS_SCAN_OPTIONS" + ] + }, "AzureUCConfig": { "description": "Azure config.", "properties": { @@ -192567,6 +192713,144 @@ }, "openapi": "3.0.0", "paths": { + "/api/v2/agentless_scanning/accounts/aws": { + "get": { + "description": "Fetches the scan options configured for AWS accounts.", + "operationId": "ListAwsScanOptions", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "description": "Response object that includes a list of AWS scan options.", + "properties": { + "data": { + "description": "A list of AWS scan options.", + "items": { + "description": "Single AWS Scan Options entry.", + "properties": { + "attributes": { + "description": "Attributes for the AWS scan options.", + "properties": { + "lambda": { + "description": "Indicates if scanning of Lambda functions is enabled.", + "example": true, + "type": "boolean" + }, + "sensitive_data": { + "description": "Indicates if scanning for sensitive data is enabled.", + "example": false, + "type": "boolean" + }, + "vuln_containers_os": { + "description": "Indicates if scanning for vulnerabilities in containers is enabled.", + "example": true, + "type": "boolean" + }, + "vuln_host_os": { + "description": "Indicates if scanning for vulnerabilities in hosts is enabled.", + "example": true, + "type": "boolean" + } + }, + "type": "object" + }, + "id": { + "description": "The ID of the AWS account.", + "example": "184366314700", + "type": "string" + }, + "type": { + "default": "aws_scan_options", + "description": "The type of the resource. The value should always be `aws_scan_options`.", + "enum": [ + "aws_scan_options" + ], + "example": "aws_scan_options", + "type": "string", + "x-enum-varnames": [ + "AWS_SCAN_OPTIONS" + ] + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + } + } + }, + "description": "OK" + }, + "403": { + "content": { + "application/json": { + "schema": { + "description": "API error response.", + "properties": { + "errors": { + "description": "A list of errors.", + "example": [ + "Bad Request" + ], + "items": { + "description": "A list of items.", + "example": "Bad Request", + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "errors" + ], + "type": "object" + } + } + }, + "description": "Not Authorized" + }, + "429": { + "content": { + "application/json": { + "schema": { + "description": "API error response.", + "properties": { + "errors": { + "description": "A list of errors.", + "example": [ + "Bad Request" + ], + "items": { + "description": "A list of items.", + "example": "Bad Request", + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "errors" + ], + "type": "object" + } + } + }, + "description": "Too many requests" + } + }, + "summary": "Get AWS Scan Options", + "tags": [ + "Agentless Scanning" + ], + "x-menu-order": 3, + "x-undo": { + "type": "safe" + } + } + }, "/api/v2/api_keys": { "get": { "description": "List all API keys available for your account.", @@ -282889,7 +283173,7 @@ }, "/api/v2/metrics/{metric_name}/active-configurations": { "get": { - "description": "List tags and aggregations that are actively queried on dashboards, notebooks, monitors, and the Metrics Explorer for a given metric name.", + "description": "List tags and aggregations that are actively queried on dashboards, notebooks, monitors, the Metrics Explorer, and using the API for a given metric name.", "operationId": "ListActiveMetricConfigurations", "parameters": [ { @@ -373019,6 +373303,10 @@ }, "name": "AWS Logs Integration" }, + { + "description": "Datadog Agentless Scanning provides visibility into risks and vulnerabilities\nwithin your hosts, running containers, and serverless functions—all without\nrequiring teams to install Agents on every host or where Agents cannot be installed.\nGo to https://www.datadoghq.com/blog/agentless-scanning/ to learn more", + "name": "Agentless Scanning" + }, { "description": "Search your Audit Logs events over HTTP.", "name": "Audit" @@ -373088,7 +373376,7 @@ "name": "Downtimes" }, { - "description": "The Event Management API allows you to programmatically post events to the Events Explorer\nand fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information.", + "description": "The Event Management API allows you to programmatically post events to the Events Explorer and fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information.\n\n**Update to Datadog monitor events `aggregation_key` starting March 1, 2025:** The Datadog monitor events `aggregation_key` is unique to each Monitor ID. Starting March 1st, this key will also include Monitor Group, making it unique per *Monitor ID and Monitor Group*. If you're using monitor events `aggregation_key` in dashboard queries or the Event API, you must migrate to use `@monitor.id`. Reach out to [support](https://www.datadoghq.com/support/) if you have any question.", "name": "Events" }, { diff --git a/data/api/v2/translate_actions.json b/data/api/v2/translate_actions.json index 39b42bcdeaf05..23aedaae9e366 100644 --- a/data/api/v2/translate_actions.json +++ b/data/api/v2/translate_actions.json @@ -1,4 +1,8 @@ { + "ListAwsScanOptions": { + "description": "Fetches the scan options configured for AWS accounts.", + "summary": "Get AWS Scan Options" + }, "ListAPIKeys": { "description": "List all API keys available for your account.", "summary": "Get all API keys" @@ -1042,7 +1046,7 @@ "request_schema_description": "Wrapper object for a single bulk tag configuration request." }, "ListActiveMetricConfigurations": { - "description": "List tags and aggregations that are actively queried on dashboards, notebooks, monitors, and the Metrics Explorer for a given metric name.", + "description": "List tags and aggregations that are actively queried on dashboards, notebooks, monitors, the Metrics Explorer, and using the API for a given metric name.", "summary": "List active tags and aggregations" }, "ListTagsByMetricName": { diff --git a/data/api/v2/translate_tags.json b/data/api/v2/translate_tags.json index b139d32c645e7..ca1d981f2384f 100644 --- a/data/api/v2/translate_tags.json +++ b/data/api/v2/translate_tags.json @@ -15,6 +15,10 @@ "name": "AWS Logs Integration", "description": "Configure your Datadog-AWS-Logs integration directly through Datadog API.\nFor more information, see the [AWS integration page](https://docs.datadoghq.com/integrations/amazon_web_services/#log-collection)." }, + "agentless-scanning": { + "name": "Agentless Scanning", + "description": "Datadog Agentless Scanning provides visibility into risks and vulnerabilities\nwithin your hosts, running containers, and serverless functions—all without\nrequiring teams to install Agents on every host or where Agents cannot be installed.\nGo to https://www.datadoghq.com/blog/agentless-scanning/ to learn more" + }, "audit": { "name": "Audit", "description": "Search your Audit Logs events over HTTP." @@ -85,7 +89,7 @@ }, "events": { "name": "Events", - "description": "The Event Management API allows you to programmatically post events to the Events Explorer\nand fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information." + "description": "The Event Management API allows you to programmatically post events to the Events Explorer and fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information.\n\n**Update to Datadog monitor events `aggregation_key` starting March 1, 2025:** The Datadog monitor events `aggregation_key` is unique to each Monitor ID. Starting March 1st, this key will also include Monitor Group, making it unique per *Monitor ID and Monitor Group*. If you're using monitor events `aggregation_key` in dashboard queries or the Event API, you must migrate to use `@monitor.id`. Reach out to [support](https://www.datadoghq.com/support/) if you have any question." }, "fastly-integration": { "name": "Fastly Integration", diff --git a/data/libraries.yaml b/data/libraries.yaml index cb59ba3fda578..345634192d474 100644 --- a/data/libraries.yaml +++ b/data/libraries.yaml @@ -271,11 +271,16 @@ Tracing: link: https://github.com/zachdaniel/spandex authors: Zach Daniel - Go: - - name: dd-trace-go + - name: dd-trace-go (v1) link: https://github.com/DataDog/dd-trace-go official: true authors: Datadog notes: Go package is 'gopkg.in/DataDog/dd-trace-go.v1'. + - name: dd-trace-go (v2) + link: https://github.com/DataDog/dd-trace-go + official: true + authors: Datadog + notes: "Go package is 'github.com/DataDog/dd-trace-go/v2'. Note: v2 is in Preview." - name: datadog-go link: https://github.com/savaki/datadog authors: Matt Ho diff --git a/data/partials/home.yaml b/data/partials/home.yaml index e9f658e93130d..984255a0af374 100644 --- a/data/partials/home.yaml +++ b/data/partials/home.yaml @@ -57,9 +57,13 @@ nav_sections: icon: events desc: Track notable changes and alerts in your applications and infrastructure - title: Workflow Automation - link: workflows + link: service_management/workflows/ icon: workflows desc: Automate and orchestrate processes across your tech stack + - title: App Builder + link: service_management/app_builder/ + icon: app-builder + desc: Create low-code applications to streamline your internal tools - nav_section: - name: 'Products' - navtiles: diff --git a/go.mod b/go.mod index 4a760cdb1df56..39322e6a297ad 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,8 @@ module documentation go 1.14 require ( - github.com/DataDog/websites-modules v1.4.202 // indirect - github.com/DataDog/websites-sources v0.0.0-20250107113720-055628b87cb7 // indirect + github.com/DataDog/websites-modules v1.4.203 // indirect + github.com/DataDog/websites-sources v0.0.0-20241125134453-6f4fe0220a22 // indirect ) // replace github.com/DataDog/websites-modules => /Users/matt.fitzsimmons/source/websites-modules diff --git a/go.sum b/go.sum index c132ebdbb9444..559b1d406b2d3 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,4 @@ -github.com/DataDog/websites-modules v1.4.202 h1:utVIiSFtR0IrA0cEnxupCo4sYISRMWxpPAFs/16t5FY= -github.com/DataDog/websites-modules v1.4.202/go.mod h1:CcQxAmCXoiFr3hNw6Q+1si65C3uOP1gB+7aX4S3h+CQ= -github.com/DataDog/websites-sources v0.0.0-20250107113720-055628b87cb7 h1:3MeRlEkGXsPT/Xq/fE5lMR/+M4thO1TlTPrOQQtFp5w= -github.com/DataDog/websites-sources v0.0.0-20250107113720-055628b87cb7/go.mod h1:RvGhXV0uQC6Ocs+n84QyL97kows6vg6VG5ZLQMHw4Fs= +github.com/DataDog/websites-modules v1.4.203 h1:oBqjUl5DlMDesbBxTt/wURwyfgQ0IF6vIw+wOXlAPnc= +github.com/DataDog/websites-modules v1.4.203/go.mod h1:CcQxAmCXoiFr3hNw6Q+1si65C3uOP1gB+7aX4S3h+CQ= +github.com/DataDog/websites-sources v0.0.0-20241125134453-6f4fe0220a22 h1:4mxdsEkowAJuPndJUWb14oibV+KYKMu37hMvX0DD3rk= +github.com/DataDog/websites-sources v0.0.0-20241125134453-6f4fe0220a22/go.mod h1:RvGhXV0uQC6Ocs+n84QyL97kows6vg6VG5ZLQMHw4Fs= diff --git a/layouts/partials/apm/apm-opentracing-custom.html b/layouts/partials/apm/apm-opentracing-custom.html index cff17f86be098..b928283efaf49 100644 --- a/layouts/partials/apm/apm-opentracing-custom.html +++ b/layouts/partials/apm/apm-opentracing-custom.html @@ -33,13 +33,6 @@
-
diff --git a/layouts/shortcodes/csm-setup-aws.en.md b/layouts/shortcodes/csm-setup-aws.en.md index 9d9ad4fa55fae..8889f2044a27f 100644 --- a/layouts/shortcodes/csm-setup-aws.en.md +++ b/layouts/shortcodes/csm-setup-aws.en.md @@ -1,13 +1,9 @@ ### Set up the Datadog AWS integration -If you haven't already, set up the [Amazon Web Services integration][1]. You must also add the [required permissions][2] for resource collection. +If you haven't already, set up the [Amazon Web Services integration][1]. You must also [enable resource collection][2] by attaching the AWS-managed SecurityAudit Policy to the Datadog IAM role in your AWS account. ### Enable CSM for your AWS accounts -Use one of the following methods to enable CSM for your AWS accounts: - -#### CSM Setup page - 1. On the [**Cloud Security Management Setup**][3] page, click **Cloud Integrations**. 1. Expand the **AWS** section. 1. To enable resource scanning for an account, click the **Plus** button, then switch the **Enable Resource Scanning** toggle to the on position. @@ -15,14 +11,8 @@ Use one of the following methods to enable CSM for your AWS accounts: 1. To create a filter that excludes certain resources from being evaluated by CSM, click the **Plus** (+) icon under **Resource Evaluation Filters (Optional)**. For more information, see [Use Filters to Exclude Resources from Evaluation][5]. 1. Click **Done**. -#### Amazon Web Services integration page - -1. On the [**Amazon Web Services Integration**][4] page, select an AWS account. -1. On the **Resource Collection** tab, select **Enable Cloud Security Management**. -1. Click **Save**. - [1]: https://docs.datadoghq.com/integrations/amazon_web_services/ -[2]: /integrations/amazon_web_services/?tab=roledelegation#cloud-security-management-misconfigurations +[2]: /integrations/amazon_web_services/?tab=roledelegation#cloud-security-management [3]: https://app.datadoghq.com/security/configuration/csm/setup [4]: https://app.datadoghq.com/integrations/amazon-web-services [5]: /security/cloud_security_management/guide/resource_evaluation_filters \ No newline at end of file diff --git a/layouts/shortcodes/csm-setup-azure.en.md b/layouts/shortcodes/csm-setup-azure.en.md index e4df30d8b9d3e..d94460f6bec3d 100644 --- a/layouts/shortcodes/csm-setup-azure.en.md +++ b/layouts/shortcodes/csm-setup-azure.en.md @@ -6,22 +6,12 @@ If you haven't already, set up the [Microsoft Azure integration][1]. ### Enable CSM for your Azure subscriptions -Use one of the following methods to enable CSM for your Azure subscriptions: - -#### CSM Setup page - 1. On the [**Cloud Security Management Setup**][2] page, click **Cloud Integrations**. 2. Expand the **Azure** section. 3. To enable resource scanning for a subscription, switch the **Resource Scanning** toggle to the on position. 4. To create a filter that excludes certain resources from being evaluated by CSM, click the **Plus** (+) icon under **Resource Evaluation Filters (Optional)**. For more information, see [Use Filters to Exclude Resources from Evaluation][4]. 5. Click **Done**. -#### Azure integration page - -1. On the [**Azure Integration**][3] page, select an Azure app registration. -2. Under **Resource Collection**, select **Enable Cloud Security Management**. -3. Click **Submit Changes**. - [1]: https://docs.datadoghq.com/integrations/azure [2]: https://app.datadoghq.com/security/configuration/csm/setup [3]: https://app.datadoghq.com/integrations/azure diff --git a/layouts/shortcodes/csm-setup-google-cloud.en.md b/layouts/shortcodes/csm-setup-google-cloud.en.md index f0835acf8ddb4..93bbcabbc45fe 100644 --- a/layouts/shortcodes/csm-setup-google-cloud.en.md +++ b/layouts/shortcodes/csm-setup-google-cloud.en.md @@ -15,22 +15,12 @@ The Datadog Google Cloud Platform integration uses service accounts to create an ### Enable CSM for your Google Cloud projects -Use one of the following methods to enable CSM for your Google Cloud projects: - -#### CSM Setup page - 1. On the [**Cloud Security Management Setup**][2] page, click **Cloud Integrations**. 2. Expand the **GCP** section. 3. To enable resource scanning for a project, switch the **Resource Scanning** toggle to the on position. 4. To create a filter that excludes certain resources from being evaluated by CSM, click the **Plus** (+) icon under **Resource Evaluation Filters (Optional)**. For more information, see [Use Filters to Exclude Resources from Evaluation][11]. 5. Click **Done**. -#### Google Cloud Platform integration page - -1. On the [**Google Cloud Platform Integration**][10] page, select a Google Cloud project. -2. Under **Resource Collection**, select **Enable Cloud Security Management**. -3. Click **Save**. - [1]: https://docs.datadoghq.com/integrations/google_cloud_platform [2]: https://app.datadoghq.com/security/configuration/csm/setup [4]: https://support.google.com/cloud/answer/6293499?hl=en diff --git a/layouts/shortcodes/dbm-sqlserver-agent-setup-kubernetes.en.md b/layouts/shortcodes/dbm-sqlserver-agent-setup-kubernetes.en.md index 8225eea2bc5f9..f492e0d7877dc 100644 --- a/layouts/shortcodes/dbm-sqlserver-agent-setup-kubernetes.en.md +++ b/layouts/shortcodes/dbm-sqlserver-agent-setup-kubernetes.en.md @@ -84,10 +84,10 @@ metadata: "driver": "FreeTDS", "include_ao_metrics": true, # Optional: For AlwaysOn users "agent_jobs": { # Optional: For monitoring SQL Server Agent jobs - "enabled": true - "collection_interval": 15 + "enabled": true, + "collection_interval": 15, "history_row_limit": 10000 - } + }, "tags": ["service:", "env:"] # Optional } ] diff --git a/layouts/shortcodes/latest-lambda-layer-version.html b/layouts/shortcodes/latest-lambda-layer-version.html index b7dd77a95c681..7c4c67de04270 100644 --- a/layouts/shortcodes/latest-lambda-layer-version.html +++ b/layouts/shortcodes/latest-lambda-layer-version.html @@ -16,12 +16,12 @@ {{- if eq (.Get "layer") "ruby" -}} - 23 + 24 {{- end -}} {{- if eq (.Get "layer") "extension" -}} - 66 + 68 {{- end -}} diff --git a/static/images/account_management/audit_logs/dashboard_change_diff.png b/static/images/account_management/audit_logs/dashboard_change_diff.png new file mode 100644 index 0000000000000..5353b10a52374 Binary files /dev/null and b/static/images/account_management/audit_logs/dashboard_change_diff.png differ diff --git a/static/images/account_management/audit_logs/monitor_change_diff.png b/static/images/account_management/audit_logs/monitor_change_diff.png new file mode 100644 index 0000000000000..3ea48d48efe9d Binary files /dev/null and b/static/images/account_management/audit_logs/monitor_change_diff.png differ diff --git a/static/images/dashboards/scheduled_reports/add_recipients.png b/static/images/dashboards/scheduled_reports/add_recipients.png new file mode 100644 index 0000000000000..2f2fdeb7179e1 Binary files /dev/null and b/static/images/dashboards/scheduled_reports/add_recipients.png differ diff --git a/static/images/dashboards/scheduled_reports/configure_report.png b/static/images/dashboards/scheduled_reports/configure_report.png new file mode 100644 index 0000000000000..953d630d3bf70 Binary files /dev/null and b/static/images/dashboards/scheduled_reports/configure_report.png differ diff --git a/static/images/dashboards/scheduled_reports/edit_variables.png b/static/images/dashboards/scheduled_reports/edit_variables.png new file mode 100644 index 0000000000000..d5636b8d6c60b Binary files /dev/null and b/static/images/dashboards/scheduled_reports/edit_variables.png differ diff --git a/static/images/dashboards/scheduled_reports/manage_reports.png b/static/images/dashboards/scheduled_reports/manage_reports.png new file mode 100644 index 0000000000000..c0b1a691c0b20 Binary files /dev/null and b/static/images/dashboards/scheduled_reports/manage_reports.png differ diff --git a/static/images/dashboards/scheduled_reports/set_schedule.png b/static/images/dashboards/scheduled_reports/set_schedule.png new file mode 100644 index 0000000000000..b207cb5dd0561 Binary files /dev/null and b/static/images/dashboards/scheduled_reports/set_schedule.png differ diff --git a/static/images/infrastructure/resource_catalog/governance/custom-policy-example-1.png b/static/images/infrastructure/resource_catalog/governance/custom-policy-example-1.png new file mode 100644 index 0000000000000..93e586f67eb62 Binary files /dev/null and b/static/images/infrastructure/resource_catalog/governance/custom-policy-example-1.png differ diff --git a/static/images/infrastructure/resource_catalog/governance/custom-policy-list-1.png b/static/images/infrastructure/resource_catalog/governance/custom-policy-list-1.png new file mode 100644 index 0000000000000..651b5e062a486 Binary files /dev/null and b/static/images/infrastructure/resource_catalog/governance/custom-policy-list-1.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_add_gov_app_to_team.png b/static/images/integrations/microsoft_teams/microsoft_teams_add_gov_app_to_team.png new file mode 100644 index 0000000000000..c3117d90a731a Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_add_gov_app_to_team.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_private_channels_step_1.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_private_channels_step_1.png new file mode 100644 index 0000000000000..18828c8a14e32 Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_private_channels_step_1.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_private_channels_step_2.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_private_channels_step_2.png new file mode 100644 index 0000000000000..80f891fcf5dcb Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_private_channels_step_2.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_1.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_1.png new file mode 100644 index 0000000000000..3dbf3296a3eb1 Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_1.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_10.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_10.png new file mode 100644 index 0000000000000..004a6d1e6699a Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_10.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_11.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_11.png new file mode 100644 index 0000000000000..be5fde292f2bc Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_11.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_12.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_12.png new file mode 100644 index 0000000000000..fb495ff9477d6 Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_12.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_13.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_13.png new file mode 100644 index 0000000000000..ac5c6d38cb98c Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_13.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_14.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_14.png new file mode 100644 index 0000000000000..3c0563107dc08 Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_14.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_3.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_3.png new file mode 100644 index 0000000000000..3894f7f4f7550 Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_3.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_4.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_4.png new file mode 100644 index 0000000000000..bb6915771cb70 Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_4.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_5.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_5.png new file mode 100644 index 0000000000000..dd8078fff5c8d Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_5.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_6.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_6.png new file mode 100644 index 0000000000000..385113f6ac966 Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_6.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_7.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_7.png new file mode 100644 index 0000000000000..4da720c679cdc Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_7.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_8.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_8.png new file mode 100644 index 0000000000000..11400f8960fb4 Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_8.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_9.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_9.png new file mode 100644 index 0000000000000..80f891fcf5dcb Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_step_9.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_template.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_template.png new file mode 100644 index 0000000000000..1713d95ca3341 Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_template.png differ diff --git a/static/images/integrations/microsoft_teams/microsoft_teams_workflows_used_a_template.png b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_used_a_template.png new file mode 100644 index 0000000000000..f9af21002227c Binary files /dev/null and b/static/images/integrations/microsoft_teams/microsoft_teams_workflows_used_a_template.png differ diff --git a/static/images/logs/workspace/export/example_exported_dataset.png b/static/images/logs/workspace/export/example_exported_dataset.png new file mode 100644 index 0000000000000..c7d9efda91faa Binary files /dev/null and b/static/images/logs/workspace/export/example_exported_dataset.png differ diff --git a/static/images/logs/workspace/export/link_to_workspace_from_dashboard.png b/static/images/logs/workspace/export/link_to_workspace_from_dashboard.png new file mode 100644 index 0000000000000..2742cf4867c8a Binary files /dev/null and b/static/images/logs/workspace/export/link_to_workspace_from_dashboard.png differ diff --git a/static/images/serverless/gcr/volume_mount.png b/static/images/serverless/gcr/volume_mount.png new file mode 100644 index 0000000000000..b052f021cf883 Binary files /dev/null and b/static/images/serverless/gcr/volume_mount.png differ diff --git a/static/images/service_management/app_builder/app-list-star.png b/static/images/service_management/app_builder/app-list-star.png new file mode 100644 index 0000000000000..0c86e052d37de Binary files /dev/null and b/static/images/service_management/app_builder/app-list-star.png differ diff --git a/static/images/service_management/app_builder/app-list-with-favorited-app.png b/static/images/service_management/app_builder/app-list-with-favorited-app.png new file mode 100644 index 0000000000000..1e299cbdf2c5c Binary files /dev/null and b/static/images/service_management/app_builder/app-list-with-favorited-app.png differ diff --git a/static/images/tracing/service_catalog/scorecards_workflow_example.png b/static/images/tracing/service_catalog/scorecards_workflow_example.png new file mode 100644 index 0000000000000..e0b2c278f104f Binary files /dev/null and b/static/images/tracing/service_catalog/scorecards_workflow_example.png differ diff --git a/static/images/watchdog/external_provider_outage.png b/static/images/watchdog/external_provider_outage.png new file mode 100644 index 0000000000000..01e87f07d0bb9 Binary files /dev/null and b/static/images/watchdog/external_provider_outage.png differ diff --git a/static/resources/json/full_spec_v1.json b/static/resources/json/full_spec_v1.json index 3f7845b225762..eb65b8b789c63 100644 --- a/static/resources/json/full_spec_v1.json +++ b/static/resources/json/full_spec_v1.json @@ -144186,7 +144186,7 @@ "type": "object" }, "LogsPipelineList": { - "description": "Array of pipeline ID strings.", + "description": "Array of all log pipeline objects configured for the organization.", "items": { "description": "Pipelines and processors operate on incoming logs,\nparsing and transforming them into structured attributes for easier querying.\n\n**Note**: These endpoints are only available for admin users.\nMake sure to use an application key created by an admin.", "properties": { @@ -553144,7 +553144,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for the given org.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -553231,6 +553231,21 @@ "format": "int64", "type": "integer" }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, "rum_replay_session_count_sum": { "description": "Shows the sum of all RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).", "format": "int64", @@ -553369,7 +553384,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for all organizations.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for all organizations.", "format": "int64", "type": "integer" }, @@ -553444,7 +553459,22 @@ "type": "integer" }, "rum_mobile_lite_session_count_roku_sum": { - "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current date for all organizations (To be introduced on October 1st, 2024).", + "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current date for all organizations (To be introduced on October 1st, 2024).", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -554186,7 +554216,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for the given org.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -554273,6 +554303,21 @@ "format": "int64", "type": "integer" }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, "rum_replay_session_count_sum": { "description": "Shows the sum of all RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).", "format": "int64", @@ -555232,7 +555277,22 @@ "type": "integer" }, "rum_mobile_lite_session_count_roku_agg_sum": { - "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current month for all organizations (To be introduced on October 1st, 2024).", + "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current month for all organizations (To be introduced on October 1st, 2024).", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_android_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current month for all organizations.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current month for all organizations.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current month for all organizations.", "format": "int64", "type": "integer" }, @@ -556545,7 +556605,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for the given org.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -556632,6 +556692,21 @@ "format": "int64", "type": "integer" }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, "rum_replay_session_count_sum": { "description": "Shows the sum of all RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).", "format": "int64", @@ -556770,7 +556845,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for all organizations.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for all organizations.", "format": "int64", "type": "integer" }, @@ -556845,7 +556920,22 @@ "type": "integer" }, "rum_mobile_lite_session_count_roku_sum": { - "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current date for all organizations (To be introduced on October 1st, 2024).", + "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current date for all organizations (To be introduced on October 1st, 2024).", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -964988,7 +965078,7 @@ "content": { "application/json": { "schema": { - "description": "Array of pipeline ID strings.", + "description": "Array of all log pipeline objects configured for the organization.", "items": { "description": "Pipelines and processors operate on incoming logs,\nparsing and transforming them into structured attributes for easier querying.\n\n**Note**: These endpoints are only available for admin users.\nMake sure to use an application key created by an admin.", "properties": { @@ -971542,7 +971632,7 @@ }, "/api/v1/monitor": { "get": { - "description": "Get details about the specified monitor from your organization.", + "description": "Get all monitors from your organization.", "operationId": "ListMonitors", "parameters": [ { @@ -971633,6 +971723,7 @@ "content": { "application/json": { "schema": { + "description": "An array of monitor objects.", "items": { "description": "Object describing a monitor.", "properties": { @@ -972586,7 +972677,7 @@ ] } ], - "summary": "Get all monitor details", + "summary": "Get all monitors", "tags": [ "Monitors" ], @@ -1132548,7 +1132639,22 @@ "type": "integer" }, "rum_mobile_lite_session_count_roku_agg_sum": { - "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current month for all organizations (To be introduced on October 1st, 2024).", + "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current month for all organizations (To be introduced on October 1st, 2024).", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_android_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current month for all organizations.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current month for all organizations.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_agg_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current month for all organizations.", "format": "int64", "type": "integer" }, @@ -1133861,7 +1133967,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for the given org.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -1133948,6 +1134054,21 @@ "format": "int64", "type": "integer" }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, "rum_replay_session_count_sum": { "description": "Shows the sum of all RUM Session Replay counts over all hours in the current date for the given org (To be introduced on October 1st, 2024).", "format": "int64", @@ -1134086,7 +1134207,7 @@ "type": "integer" }, "profiling_host_top99p": { - "description": "Shows the 99th percentile of all profiled hosts over all hours in the current date for all organizations.", + "description": "Shows the 99th percentile of all profiled hosts over all hours within the current date for all organizations.", "format": "int64", "type": "integer" }, @@ -1134161,7 +1134282,22 @@ "type": "integer" }, "rum_mobile_lite_session_count_roku_sum": { - "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours in the current date for all organizations (To be introduced on October 1st, 2024).", + "description": "Shows the sum of all mobile RUM lite sessions on Roku over all hours within the current date for all organizations (To be introduced on October 1st, 2024).", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_android_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on Android over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_ios_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on iOS over all hours within the current date for the given org.", + "format": "int64", + "type": "integer" + }, + "rum_mobile_replay_session_count_reactnative_sum": { + "description": "Shows the sum of all mobile RUM replay sessions on React Native over all hours within the current date for the given org.", "format": "int64", "type": "integer" }, @@ -1138024,7 +1138160,7 @@ "name": "Embeddable Graphs" }, { - "description": "The Event Management API allows you to programmatically post events to the Events Explorer\nand fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information.", + "description": "The Event Management API allows you to programmatically post events to the Events Explorer and fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information.\n\n**Update to Datadog monitor events `aggregation_key` starting March 1, 2025:** The Datadog monitor events `aggregation_key` is unique to each Monitor ID. Starting March 1st, this key will also include Monitor Group, making it unique per *Monitor ID and Monitor Group*. If you're using monitor events `aggregation_key` in dashboard queries or the Event API, you must migrate to use `@monitor.id`. Reach out to [support](https://www.datadoghq.com/support/) if you have any question.", "name": "Events" }, { diff --git a/static/resources/json/full_spec_v2.json b/static/resources/json/full_spec_v2.json index 04343f9127cd5..8f7c9afa3cfea 100644 --- a/static/resources/json/full_spec_v2.json +++ b/static/resources/json/full_spec_v2.json @@ -13469,6 +13469,152 @@ }, "type": "object" }, + "AwsScanOptionsAttributes": { + "description": "Attributes for the AWS scan options.", + "properties": { + "lambda": { + "description": "Indicates if scanning of Lambda functions is enabled.", + "example": true, + "type": "boolean" + }, + "sensitive_data": { + "description": "Indicates if scanning for sensitive data is enabled.", + "example": false, + "type": "boolean" + }, + "vuln_containers_os": { + "description": "Indicates if scanning for vulnerabilities in containers is enabled.", + "example": true, + "type": "boolean" + }, + "vuln_host_os": { + "description": "Indicates if scanning for vulnerabilities in hosts is enabled.", + "example": true, + "type": "boolean" + } + }, + "type": "object" + }, + "AwsScanOptionsData": { + "description": "Single AWS Scan Options entry.", + "properties": { + "attributes": { + "description": "Attributes for the AWS scan options.", + "properties": { + "lambda": { + "description": "Indicates if scanning of Lambda functions is enabled.", + "example": true, + "type": "boolean" + }, + "sensitive_data": { + "description": "Indicates if scanning for sensitive data is enabled.", + "example": false, + "type": "boolean" + }, + "vuln_containers_os": { + "description": "Indicates if scanning for vulnerabilities in containers is enabled.", + "example": true, + "type": "boolean" + }, + "vuln_host_os": { + "description": "Indicates if scanning for vulnerabilities in hosts is enabled.", + "example": true, + "type": "boolean" + } + }, + "type": "object" + }, + "id": { + "description": "The ID of the AWS account.", + "example": "184366314700", + "type": "string" + }, + "type": { + "default": "aws_scan_options", + "description": "The type of the resource. The value should always be `aws_scan_options`.", + "enum": [ + "aws_scan_options" + ], + "example": "aws_scan_options", + "type": "string", + "x-enum-varnames": [ + "AWS_SCAN_OPTIONS" + ] + } + }, + "type": "object" + }, + "AwsScanOptionsResponse": { + "description": "Response object that includes a list of AWS scan options.", + "properties": { + "data": { + "description": "A list of AWS scan options.", + "items": { + "description": "Single AWS Scan Options entry.", + "properties": { + "attributes": { + "description": "Attributes for the AWS scan options.", + "properties": { + "lambda": { + "description": "Indicates if scanning of Lambda functions is enabled.", + "example": true, + "type": "boolean" + }, + "sensitive_data": { + "description": "Indicates if scanning for sensitive data is enabled.", + "example": false, + "type": "boolean" + }, + "vuln_containers_os": { + "description": "Indicates if scanning for vulnerabilities in containers is enabled.", + "example": true, + "type": "boolean" + }, + "vuln_host_os": { + "description": "Indicates if scanning for vulnerabilities in hosts is enabled.", + "example": true, + "type": "boolean" + } + }, + "type": "object" + }, + "id": { + "description": "The ID of the AWS account.", + "example": "184366314700", + "type": "string" + }, + "type": { + "default": "aws_scan_options", + "description": "The type of the resource. The value should always be `aws_scan_options`.", + "enum": [ + "aws_scan_options" + ], + "example": "aws_scan_options", + "type": "string", + "x-enum-varnames": [ + "AWS_SCAN_OPTIONS" + ] + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "AwsScanOptionsType": { + "default": "aws_scan_options", + "description": "The type of the resource. The value should always be `aws_scan_options`.", + "enum": [ + "aws_scan_options" + ], + "example": "aws_scan_options", + "type": "string", + "x-enum-varnames": [ + "AWS_SCAN_OPTIONS" + ] + }, "AzureUCConfig": { "description": "Azure config.", "properties": { @@ -192567,6 +192713,144 @@ }, "openapi": "3.0.0", "paths": { + "/api/v2/agentless_scanning/accounts/aws": { + "get": { + "description": "Fetches the scan options configured for AWS accounts.", + "operationId": "ListAwsScanOptions", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "description": "Response object that includes a list of AWS scan options.", + "properties": { + "data": { + "description": "A list of AWS scan options.", + "items": { + "description": "Single AWS Scan Options entry.", + "properties": { + "attributes": { + "description": "Attributes for the AWS scan options.", + "properties": { + "lambda": { + "description": "Indicates if scanning of Lambda functions is enabled.", + "example": true, + "type": "boolean" + }, + "sensitive_data": { + "description": "Indicates if scanning for sensitive data is enabled.", + "example": false, + "type": "boolean" + }, + "vuln_containers_os": { + "description": "Indicates if scanning for vulnerabilities in containers is enabled.", + "example": true, + "type": "boolean" + }, + "vuln_host_os": { + "description": "Indicates if scanning for vulnerabilities in hosts is enabled.", + "example": true, + "type": "boolean" + } + }, + "type": "object" + }, + "id": { + "description": "The ID of the AWS account.", + "example": "184366314700", + "type": "string" + }, + "type": { + "default": "aws_scan_options", + "description": "The type of the resource. The value should always be `aws_scan_options`.", + "enum": [ + "aws_scan_options" + ], + "example": "aws_scan_options", + "type": "string", + "x-enum-varnames": [ + "AWS_SCAN_OPTIONS" + ] + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + } + } + }, + "description": "OK" + }, + "403": { + "content": { + "application/json": { + "schema": { + "description": "API error response.", + "properties": { + "errors": { + "description": "A list of errors.", + "example": [ + "Bad Request" + ], + "items": { + "description": "A list of items.", + "example": "Bad Request", + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "errors" + ], + "type": "object" + } + } + }, + "description": "Not Authorized" + }, + "429": { + "content": { + "application/json": { + "schema": { + "description": "API error response.", + "properties": { + "errors": { + "description": "A list of errors.", + "example": [ + "Bad Request" + ], + "items": { + "description": "A list of items.", + "example": "Bad Request", + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "errors" + ], + "type": "object" + } + } + }, + "description": "Too many requests" + } + }, + "summary": "Get AWS Scan Options", + "tags": [ + "Agentless Scanning" + ], + "x-menu-order": 3, + "x-undo": { + "type": "safe" + } + } + }, "/api/v2/api_keys": { "get": { "description": "List all API keys available for your account.", @@ -282889,7 +283173,7 @@ }, "/api/v2/metrics/{metric_name}/active-configurations": { "get": { - "description": "List tags and aggregations that are actively queried on dashboards, notebooks, monitors, and the Metrics Explorer for a given metric name.", + "description": "List tags and aggregations that are actively queried on dashboards, notebooks, monitors, the Metrics Explorer, and using the API for a given metric name.", "operationId": "ListActiveMetricConfigurations", "parameters": [ { @@ -373019,6 +373303,10 @@ }, "name": "AWS Logs Integration" }, + { + "description": "Datadog Agentless Scanning provides visibility into risks and vulnerabilities\nwithin your hosts, running containers, and serverless functions—all without\nrequiring teams to install Agents on every host or where Agents cannot be installed.\nGo to https://www.datadoghq.com/blog/agentless-scanning/ to learn more", + "name": "Agentless Scanning" + }, { "description": "Search your Audit Logs events over HTTP.", "name": "Audit" @@ -373088,7 +373376,7 @@ "name": "Downtimes" }, { - "description": "The Event Management API allows you to programmatically post events to the Events Explorer\nand fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information.", + "description": "The Event Management API allows you to programmatically post events to the Events Explorer and fetch events from the Events Explorer. See the [Event Management page](https://docs.datadoghq.com/service_management/events/) for more information.\n\n**Update to Datadog monitor events `aggregation_key` starting March 1, 2025:** The Datadog monitor events `aggregation_key` is unique to each Monitor ID. Starting March 1st, this key will also include Monitor Group, making it unique per *Monitor ID and Monitor Group*. If you're using monitor events `aggregation_key` in dashboard queries or the Event API, you must migrate to use `@monitor.id`. Reach out to [support](https://www.datadoghq.com/support/) if you have any question.", "name": "Events" }, {