From c9f0999623feba1b8493d44940ebf66ff94168bf Mon Sep 17 00:00:00 2001 From: Florence Morris Date: Thu, 12 Sep 2024 17:09:45 -0400 Subject: [PATCH] Merge main into cloud 2.0 (#18906) includes - DOC-10829 - Copied v24.1/essential-metrics.md to v24.2/essential-metrics.md to get advanced changes. --------- Co-authored-by: Matt Linville --- src/current/_data/releases.yml | 35 +++++ .../_includes/releases/v23.1/v23.1.26.md | 41 ++++++ .../_includes/v24.1/essential-metrics.md | 2 +- .../known-limitations/aost-limitations.md | 1 + .../create-statistics-aost-limitation.md | 1 + src/current/_includes/v24.2/cdc/sink-list.md | 1 + .../_includes/v24.2/essential-metrics.md | 136 +++++++++--------- .../known-limitations/aost-limitations.md | 1 + .../create-statistics-aost-limitation.md | 1 + src/current/releases/index.md | 4 + src/current/v22.1/alter-backup.md | 10 +- src/current/v22.1/alter-changefeed.md | 4 +- src/current/v22.1/show-create-schedule.md | 2 +- src/current/v22.2/alter-backup-schedule.md | 2 +- src/current/v22.2/alter-backup.md | 10 +- src/current/v22.2/alter-changefeed.md | 2 +- src/current/v22.2/show-create-schedule.md | 2 +- src/current/v23.1/alter-backup-schedule.md | 2 +- src/current/v23.1/alter-backup.md | 16 +-- src/current/v23.1/alter-changefeed.md | 2 +- src/current/v23.1/show-create-schedule.md | 2 +- src/current/v23.2/alter-backup-schedule.md | 2 +- src/current/v23.2/alter-backup.md | 2 +- src/current/v23.2/alter-changefeed.md | 2 +- src/current/v23.2/show-create-schedule.md | 2 +- src/current/v24.1/alter-backup-schedule.md | 2 +- src/current/v24.1/alter-backup.md | 2 +- src/current/v24.1/alter-changefeed.md | 2 +- src/current/v24.1/as-of-system-time.md | 20 ++- src/current/v24.1/create-statistics.md | 4 + src/current/v24.1/known-limitations.md | 5 +- src/current/v24.1/show-create-schedule.md | 2 +- src/current/v24.2/alter-backup-schedule.md | 2 +- src/current/v24.2/alter-backup.md | 2 +- src/current/v24.2/alter-changefeed.md | 2 +- src/current/v24.2/as-of-system-time.md | 20 ++- src/current/v24.2/changefeed-sinks.md | 56 ++++++++ src/current/v24.2/create-changefeed.md | 17 ++- src/current/v24.2/create-statistics.md | 4 + src/current/v24.2/known-limitations.md | 5 +- src/current/v24.2/show-create-schedule.md | 2 +- 41 files changed, 291 insertions(+), 141 deletions(-) create mode 100644 src/current/_includes/releases/v23.1/v23.1.26.md create mode 100644 src/current/_includes/v24.1/known-limitations/aost-limitations.md create mode 100644 src/current/_includes/v24.1/known-limitations/create-statistics-aost-limitation.md create mode 100644 src/current/_includes/v24.2/known-limitations/aost-limitations.md create mode 100644 src/current/_includes/v24.2/known-limitations/create-statistics-aost-limitation.md diff --git a/src/current/_data/releases.yml b/src/current/_data/releases.yml index caab973ac13..4781f3d73b0 100644 --- a/src/current/_data/releases.yml +++ b/src/current/_data/releases.yml @@ -6644,3 +6644,38 @@ docker_arm_limited_access: false source: true previous_release: v24.2.0 + withdrawn: true + +- release_name: v23.1.26 + major_version: v23.1 + release_date: '2024-09-12' + release_type: Production + go_version: go1.22.5 + sha: 43e6847e94880caa6626471a919d683e418195d1 + has_sql_only: true + has_sha256sum: true + mac: + mac_arm: true + mac_arm_experimental: true + mac_arm_limited_access: true + windows: true + linux: + linux_arm: true + linux_arm_experimental: false + linux_arm_limited_access: false + linux_intel_fips: true + linux_arm_fips: false + docker: + docker_image: cockroachdb/cockroach + docker_arm: true + docker_arm_experimental: false + docker_arm_limited_access: false + source: true + previous_release: v23.1.25 + cloud_only: true + cloud_only_message_short: 'Available only for select CockroachDB Cloud clusters' + cloud_only_message: > + This version is currently available only for select + CockroachDB Cloud clusters. To request to upgrade + a CockroachDB self-hosted cluster to this version, + [contact support](https://support.cockroachlabs.com/hc/requests/new). diff --git a/src/current/_includes/releases/v23.1/v23.1.26.md b/src/current/_includes/releases/v23.1/v23.1.26.md new file mode 100644 index 00000000000..b027c8fa013 --- /dev/null +++ b/src/current/_includes/releases/v23.1/v23.1.26.md @@ -0,0 +1,41 @@ +## v23.1.26 + +Release Date: September 12, 2024 + +{% include releases/new-release-downloads-docker-image.md release=include.release %} +

Security updates

+ +- For clusters using [Cluster SSO using JSON Web Tokens (JWTs)]({% link v23.1/sso-sql.md %}), the [cluster setting]({% link v23.1/cluster-settings.md %}) `server.jwt_authentication.issuers` can now take multiple values to support various kinds of providers and their mapped JWKS URI. You can set it to one of the following values, which are parsed by Go: + 1. A string that contains a single valid issuer URI. Example: `https://accounts.google.com` + 1. A string that contains a JSON array of valid issuer URIs. Example: `['example.com/adfs','https://accounts.google.com']` + 1. A string that contains a JSON map of valid issuer URIs to corresponding JWKS URIs and deserialized into a map of issuer URLs to corresponding JWKS URIs. A JSON map overrides the JWKS URI published in the issuer's `well-known/` endpoint. Example: `'{ "issuer_jwks_map": { "https://accounts.google.com": "https://www.googleapis.com/oauth2/v3/certs", "example.com/adfs": "https://example.com/adfs/discovery/keys" } }'` + + When `issuer_jwks_map` is set, the key set is fetched from the JWKS URI directly. Otherwise, when `JWKSAutoFetchEnabled` is set, the JWKS URI is fetched from the issuer's `well-known/` endpoint. [#128669][#128669] + +

Operational changes

+ +- New [structured logging events]({% link v23.1/logging.md %}) in the `OPS` channel report broken connections and related transactions during node shutdown. + - `node_shutdown_connection_timeout`: Logged if there are still open client connections after the timeout defined by `server.shutdown.connections.timeout` expires. + - `node_shutdown_transaction_timeout`: Logged if there are still open transactions on those open client connections after the timeout defined by `server.shutdown.transactions.timeout` expires. [#128709][#128709] + +

Bug fixes

+ +- Fixed a bug introduced in v23.1 in which output of [`EXPLAIN (OPT, REDACT)`]({% link v23.1/explain.md %}) of the following `CREATE` statements was not redacted: + - `EXPLAIN (OPT, REDACT) CREATE TABLE` + - `EXPLAIN (OPT, REDACT) CREATE VIEW` + - `EXPLAIN (OPT, REDACT) CREATE FUNCTION` + + [#128487][#128487] + +- Fixed a bug where incorrect values could be produced for virtual [computed columns]({% link v23.1/computed-columns.md %}) in rare cases when the virtual column expression's type did not match the type of the virtual column. [#129008][#129008] +- Fixed a bug where errors like `ERROR: column 'crdb_internal_idx_expr' does not exist` could be logged when accessing a table with an [expression index]({% link v23.1/expression-indexes.md %}) where the expression evaluates to an `ENUM` type. Example: `CREATE INDEX ON t ((col::an_enum))` [#129091][#129091] +- Fixed a bug introduced in v23.1 where a [user-defined function's]({% link v23.1/user-defined-functions.md %}) return type's parameters could not be named when dropping a user-defined function or procedure. [#115906][#115906] +- Fixed a slow-building memory leak when a cluster uses [GSSAPI Kerberos authentication]({% link v23.1/gssapi_authentication.md %}). [#130320][#130320] + +[#115906]: https://github.com/cockroachdb/cockroach/pull/115906 +[#128487]: https://github.com/cockroachdb/cockroach/pull/128487 +[#128669]: https://github.com/cockroachdb/cockroach/pull/128669 +[#128709]: https://github.com/cockroachdb/cockroach/pull/128709 +[#129008]: https://github.com/cockroachdb/cockroach/pull/129008 +[#129091]: https://github.com/cockroachdb/cockroach/pull/129091 +[#130320]: https://github.com/cockroachdb/cockroach/pull/130320 diff --git a/src/current/_includes/v24.1/essential-metrics.md b/src/current/_includes/v24.1/essential-metrics.md index 1ba70efbf2d..9070181b7e8 100644 --- a/src/current/_includes/v24.1/essential-metrics.md +++ b/src/current/_includes/v24.1/essential-metrics.md @@ -149,7 +149,7 @@ The **Usage** column explains why each metric is important to visualize in a cus | jobs.backup.currently_running | {% if include.deployment == 'self-hosted' %}jobs.backup.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of backup jobs currently running | See Description. | | jobs.backup.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.backup.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of backup jobs currently considered Paused | Monitor and alert on this metric to safeguard against an inadvertent operational error of leaving a backup job in a paused state for an extended period of time. In functional areas, a paused job can hold resources or have concurrency impact or some other negative consequence. Paused backup may break the [recovery point objective (RPO)]({% link {{ page.version.version }}/backup.md %}#performance). | | schedules.BACKUP.failed | {% if include.deployment == 'self-hosted' %}schedules.backup.failed |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of BACKUP jobs failed | Monitor this metric and investigate backup job failures. | -| schedules.BACKUP.last-completed-time | {% if include.deployment == 'self-hosted' %}schedules.backup.last_completed_time |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} The unix timestamp of the most recently completed backup by a schedule specified as maintaining this metric | Monitor this metric to know that backups are meeting the [recovery point objective (RPO)]({% link {{ page.version.version }}/backup.md %}#performance). | +| schedules.BACKUP.last-completed-time | {% if include.deployment == 'self-hosted' %}schedules.backup.last_completed_time |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} The Unix timestamp of the most recently completed backup by a schedule specified as maintaining this metric | Monitor this metric to ensure that backups are meeting the [recovery point objective (RPO)]({% link {{ page.version.version }}/disaster-recovery-overview.md %}#resilience-strategy). Each node exports the time that it last completed a backup on behalf of the schedule. If a node is restarted, it will report `0` until it completes a backup. If all nodes are restarted, `max()` is `0` until a node completes a backup.

To make use of this metric, first, from each node, take the maximum over a rolling window equal to or greater than the backup frequency, and then take the maximum of those values across nodes. For example with a backup frequency of 60 minutes, monitor `time() - max_across_nodes(max_over_time(schedules_BACKUP_last_completed_time, 60min))`. | ## Changefeeds diff --git a/src/current/_includes/v24.1/known-limitations/aost-limitations.md b/src/current/_includes/v24.1/known-limitations/aost-limitations.md new file mode 100644 index 00000000000..811c884d08d --- /dev/null +++ b/src/current/_includes/v24.1/known-limitations/aost-limitations.md @@ -0,0 +1 @@ +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v24.1/known-limitations/create-statistics-aost-limitation.md new file mode 100644 index 00000000000..09f86f51c48 --- /dev/null +++ b/src/current/_includes/v24.1/known-limitations/create-statistics-aost-limitation.md @@ -0,0 +1 @@ +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file diff --git a/src/current/_includes/v24.2/cdc/sink-list.md b/src/current/_includes/v24.2/cdc/sink-list.md index 7888ba1794a..6468b3d317c 100644 --- a/src/current/_includes/v24.2/cdc/sink-list.md +++ b/src/current/_includes/v24.2/cdc/sink-list.md @@ -1,3 +1,4 @@ +- {% if page.name == "changefeed-sinks.md" %} [Amazon MSK](#amazon-msk) {% else %} [Amazon MSK]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk) {% endif %} - {% if page.name == "changefeed-sinks.md" %} [Apache Pulsar](#apache-pulsar) (in Preview) {% else %} [Apache Pulsar]({% link {{ page.version.version }}/changefeed-sinks.md %}#apache-pulsar) (in Preview) {% endif %} - {% if page.name == "changefeed-sinks.md" %} [Azure Event Hubs](#azure-event-hubs) {% else %} [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs) {% endif %} - {% if page.name == "changefeed-sinks.md" %} [Cloud Storage](#cloud-storage-sink) / HTTP {% else %} [Cloud Storage]({% link {{ page.version.version }}/changefeed-sinks.md %}#cloud-storage-sink) / HTTP {% endif %} diff --git a/src/current/_includes/v24.2/essential-metrics.md b/src/current/_includes/v24.2/essential-metrics.md index 0da173a35d1..9070181b7e8 100644 --- a/src/current/_includes/v24.2/essential-metrics.md +++ b/src/current/_includes/v24.2/essential-metrics.md @@ -2,53 +2,53 @@ These essential CockroachDB metrics enable you to build custom dashboards with t {% if include.deployment == 'self-hosted' %} * [Grafana]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}#step-5-visualize-metrics-in-grafana) * [Datadog Integration]({% link {{ page.version.version }}/datadog.md %}) - The [**Datadog Integration Metric Name**](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics) column lists the corresponding Datadog metric which requires the `cockroachdb.` prefix. -{% elsif include.deployment == 'dedicated' %} -* [Datadog integration]({% link cockroachcloud/tools-page.md %}#monitor-cockroachdb-dedicated-with-datadog) - The [**Datadog Integration Metric Name**](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics) column lists the corresponding Datadog metric which requires the `crdb_dedicated.` prefix. -* [Metrics export]({% link cockroachcloud/export-metrics.md %}) +{% elsif include.deployment == 'advanced' %} +* [Datadog integration]({% link cockroachcloud/tools-page.md %}#monitor-cockroachdb-cloud-with-datadog) - The [**Datadog Integration Metric Name**](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics) column lists the corresponding Datadog metric which requires the `crdb_cloud.` prefix. +* [Metrics export]({% link cockroachcloud/export-metrics-advanced.md %}) {% endif %} The **Usage** column explains why each metric is important to visualize in a custom dashboard and how to make both practical and actionable use of the metric in a production deployment. ## Platform -|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'dedicated' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | -| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'dedicated' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_cloud.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | | sys.cpu.combined.percent-normalized | sys.cpu.combined.percent.normalized | Current user+system CPU percentage consumed by the CRDB process, normalized by number of cores | This metric gives the CPU utilization percentage by the CockroachDB process. If it is equal to 1 (or 100%), then the CPU is overloaded. The CockroachDB process should not be running with over 80% utilization for extended periods of time (hours). This metric is used in the DB Console [**CPU Percent** graph]({% link {{ page.version.version }}/ui-hardware-dashboard.md %}#cpu-percent). | | sys.cpu.host.combined.percent-normalized | NOT AVAILABLE | Current user+system CPU percentage consumed by all processes on the host OS, normalized by number of cores. If the CRDB process is run in a containerized environment, the host OS is the container since the CRDB process cannot inspect CPU usage beyond the container. | This metric gives the CPU utilization percentage of the underlying server, virtual server or container hosting the CockroachDB process. It includes CockroachDB process and non-CockroachDB process usage. If it is equal to 1 (or 100%), then the CPU is overloaded. The CockroachDB process should not be running in an environment with an overloaded state for extended periods of time (hours). This metric is used in the DB Console **Host CPU Percent** graph. | | sys.cpu.user.percent | sys.cpu.user.percent | Current user CPU percentage consumed by the CRDB process | This metric gives the CPU usage percentage at the user level by the CockroachDB process only. This is similar to the Linux `top` command output. The metric value can be more than 1 (or 100%) on multi-core systems. It is best to combine user and system metrics. | | sys.cpu.sys.percent | sys.cpu.sys.percent | Current system CPU percentage consumed by the CRDB process | This metric gives the CPU usage percentage at the system (Linux kernel) level by the CockroachDB process only. This is similar to the Linux `top` command output. The metric value can be more than 1 (or 100%) on multi-core systems. It is best to combine user and system metrics. | | sys.rss | sys.rss | Current process memory (RSS) | This metric gives the amount of RAM used by the CockroachDB process. Persistently low values over an extended period of time suggest there is underutilized memory that can be put to work with adjusted [settings for `--cache` or `--max_sql_memory`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size) or both. Conversely, a high utilization, even if a temporary spike, indicates an increased risk of [Out-of-memory (OOM) crash]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash) (particularly since the [swap is generally disabled]({% link {{ page.version.version }}/recommended-production-settings.md %}#memory)). | -| sql.mem.root.current | {% if include.deployment == 'self-hosted' %}sql.mem.root.current |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Current sql statement memory usage for root | This metric shows how memory set aside for temporary materializations, such as hash tables and intermediary result sets, is utilized. Use this metric to optimize memory allocations based on long term observations. The maximum amount is set with [`--max_sql_memory`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size). If the utilization of sql memory is persistently low, perhaps some portion of this memory allocation can be shifted to [`--cache`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size). | -| sys.host.disk.write.bytes | {% if include.deployment == 'self-hosted' %}sys.host.disk.write.bytes |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Bytes written to all disks since this process started | This metric reports the effective storage device write throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | -| sys.host.disk.write.count | {% if include.deployment == 'self-hosted' %}sys.host.disk.write |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Disk write operations across all disks since this process started | This metric reports the effective storage device write IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | -| sys.host.disk.read.bytes | {% if include.deployment == 'self-hosted' %}sys.host.disk.read.bytes |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Bytes read from all disks since this process started | This metric reports the effective storage device read throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | -| sys.host.disk.read.count | {% if include.deployment == 'self-hosted' %}sys.host.disk.read |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Disk read operations across all disks since this process started | This metric reports the effective storage device read IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | -| sys.host.disk.iopsinprogress | {% if include.deployment == 'self-hosted' %}sys.host.disk.iopsinprogress |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} IO operations currently in progress on this host | This metric gives the average queue length of the storage device. It characterizes the storage device's performance capability. All I/O performance metrics are Linux counters and correspond to the `avgqu-sz` in the Linux `iostat` command output. You need to view the device queue graph in the context of the actual read/write IOPS and MBPS metrics that show the actual device utilization. If the device is not keeping up, the queue will grow. Values over 10 are bad. Values around 5 mean the device is working hard trying to keep up. For internal (on chassis) [NVMe](https://www.wikipedia.org/wiki/NVM_Express) devices, the queue values are typically 0. For network connected devices, such as [AWS EBS volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html), the normal operating range of values is 1 to 2. Spikes in values are OK. They indicate an I/O spike where the device fell behind and then caught up. End users may experience inconsistent response times, but there should be no cluster stability issues. If the queue is greater than 5 for an extended period of time and IOPS or MBPS are low, then the storage is most likely not provisioned per Cockroach Labs guidance. In AWS EBS, it is commonly an EBS type, such as gp2, not suitable as database primary storage. If I/O is low and the queue is low, the most likely scenario is that the CPU is lacking and not driving I/O. One such case is a cluster with nodes with only 2 vcpus which is not supported [sizing]({% link {{ page.version.version }}/recommended-production-settings.md %}#sizing) for production deployments. There are quite a few background processes in the database that take CPU away from the workload, so the workload is just not getting the CPU. Review [storage and disk I/O]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#storage-and-disk-i-o). | +| sql.mem.root.current | {% if include.deployment == 'self-hosted' %}sql.mem.root.current |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Current sql statement memory usage for root | This metric shows how memory set aside for temporary materializations, such as hash tables and intermediary result sets, is utilized. Use this metric to optimize memory allocations based on long term observations. The maximum amount is set with [`--max_sql_memory`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size). If the utilization of sql memory is persistently low, perhaps some portion of this memory allocation can be shifted to [`--cache`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size). | +| sys.host.disk.write.bytes | {% if include.deployment == 'self-hosted' %}sys.host.disk.write.bytes |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Bytes written to all disks since this process started | This metric reports the effective storage device write throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | +| sys.host.disk.write.count | {% if include.deployment == 'self-hosted' %}sys.host.disk.write |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Disk write operations across all disks since this process started | This metric reports the effective storage device write IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | +| sys.host.disk.read.bytes | {% if include.deployment == 'self-hosted' %}sys.host.disk.read.bytes |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Bytes read from all disks since this process started | This metric reports the effective storage device read throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | +| sys.host.disk.read.count | {% if include.deployment == 'self-hosted' %}sys.host.disk.read |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Disk read operations across all disks since this process started | This metric reports the effective storage device read IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | +| sys.host.disk.iopsinprogress | {% if include.deployment == 'self-hosted' %}sys.host.disk.iopsinprogress |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} IO operations currently in progress on this host | This metric gives the average queue length of the storage device. It characterizes the storage device's performance capability. All I/O performance metrics are Linux counters and correspond to the `avgqu-sz` in the Linux `iostat` command output. You need to view the device queue graph in the context of the actual read/write IOPS and MBPS metrics that show the actual device utilization. If the device is not keeping up, the queue will grow. Values over 10 are bad. Values around 5 mean the device is working hard trying to keep up. For internal (on chassis) [NVMe](https://www.wikipedia.org/wiki/NVM_Express) devices, the queue values are typically 0. For network connected devices, such as [AWS EBS volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html), the normal operating range of values is 1 to 2. Spikes in values are OK. They indicate an I/O spike where the device fell behind and then caught up. End users may experience inconsistent response times, but there should be no cluster stability issues. If the queue is greater than 5 for an extended period of time and IOPS or MBPS are low, then the storage is most likely not provisioned per Cockroach Labs guidance. In AWS EBS, it is commonly an EBS type, such as gp2, not suitable as database primary storage. If I/O is low and the queue is low, the most likely scenario is that the CPU is lacking and not driving I/O. One such case is a cluster with nodes with only 2 vcpus which is not supported [sizing]({% link {{ page.version.version }}/recommended-production-settings.md %}#sizing) for production deployments. There are quite a few background processes in the database that take CPU away from the workload, so the workload is just not getting the CPU. Review [storage and disk I/O]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#storage-and-disk-i-o). | | sys.host.net.recv.bytes | sys.host.net.recv.bytes | Bytes received on all network interfaces since this process started | This metric gives the node's ingress/egress network transfer rates for flat sections which may indicate insufficiently provisioned networking or high error rates. CockroachDB is using a reliable TCP/IP protocol, so errors result in delivery retries that create a "slow network" effect. | | sys.host.net.send.bytes | sys.host.net.send.bytes | Bytes sent on all network interfaces since this process started | This metric gives the node's ingress/egress network transfer rates for flat sections which may indicate insufficiently provisioned networking or high error rates. CockroachDB is using a reliable TCP/IP protocol, so errors result in delivery retries that create a "slow network" effect. | | clock-offset.meannanos | clock.offset.meannanos | Mean clock offset with other nodes | This metric gives the node's clock skew. In a well-configured environment, the actual clock skew would be in the sub-millisecond range. A skew exceeding 5 ms is likely due to a NTP service mis-configuration. Reducing the actual clock skew reduces the probability of uncertainty related conflicts and corresponding retires which has a positive impact on workload performance. Conversely, a larger actual clock skew increases the probability of retries due to uncertainty conflicts, with potentially measurable adverse effects on workload performance. | ## Storage -
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'dedicated' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | -| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'dedicated' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | -| capacity | {% if include.deployment == 'self-hosted' %}capacity.total |{% elsif include.deployment == 'dedicated' %}capacity |{% endif %} Total storage capacity | This metric gives total storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). | +
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_cloud.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| capacity | {% if include.deployment == 'self-hosted' %}capacity.total |{% elsif include.deployment == 'advanced' %}capacity |{% endif %} Total storage capacity | This metric gives total storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). | | capacity.available | capacity.available | Available storage capacity | This metric gives available storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). | | capacity.used | capacity.used | Used storage capacity | This metric gives used storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). | -| storage.write-stalls | {% if include.deployment == 'self-hosted' %}storage.write.stalls |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of instances of intentional write stalls to backpressure incoming writes | This metric reports actual disk stall events. Ideally, investigate all reports of disk stalls. As a pratical guideline, one stall per minute is not likely to have a material impact on workload beyond an occasional increase in response time. However one stall per second should be viewed as problematic and investigated actively. It is particularly problematic if the rate persists over an extended period of time, and worse, if it is increasing. | +| storage.write-stalls | {% if include.deployment == 'self-hosted' %}storage.write.stalls |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of instances of intentional write stalls to backpressure incoming writes | This metric reports actual disk stall events. Ideally, investigate all reports of disk stalls. As a pratical guideline, one stall per minute is not likely to have a material impact on workload beyond an occasional increase in response time. However one stall per second should be viewed as problematic and investigated actively. It is particularly problematic if the rate persists over an extended period of time, and worse, if it is increasing. | | rocksdb.compactions | rocksdb.compactions.total | Number of SST compactions | This metric reports the number of a node's [LSM compactions]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#lsm-health). If the number of compactions remains elevated while the LSM health does not improve, compactions are not keeping up with the workload. If the condition persists for an extended period, the cluster will initially exhibit performance issues that will eventually escalate into stability issues. | | rocksdb.block.cache.hits | rocksdb.block.cache.hits | Count of block cache hits | This metric gives hits to block cache which is reserved memory. It is allocated upon the start of a node process by the [`--cache` flag]({% link {{ page.version.version }}/cockroach-start.md %}#general) and never shrinks. By observing block cache hits and misses, you can fine-tune memory allocations in the node process for the demands of the workload. | | rocksdb.block.cache.misses | rocksdb.block.cache.misses | Count of block cache misses | This metric gives misses to block cache which is reserved memory. It is allocated upon the start of a node process by the [`--cache` flag]({% link {{ page.version.version }}/cockroach-start.md %}#general) and never shrinks. By observing block cache hits and misses, you can fine-tune memory allocations in the node process for the demands of the workload. | ## Health -|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'dedicated' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | -| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'dedicated' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_cloud.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | | sys.uptime | sys.uptime | Process uptime | This metric measures the length of time, in seconds, that the CockroachDB process has been running. Monitor this metric to detect events such as node restarts, which may require investigation or intervention. | -| admission.io.overload | {% if include.deployment == 'self-hosted' %}admission.io.overload |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} 1-normalized float indicating whether IO admission control considers the store as overloaded with respect to compaction out of L0 (considers sub-level and file counts). | If the value of this metric exceeds 1, then it indicates overload. You can also look at the metrics `storage.l0-num-files`, `storage.l0-sublevels` or `rocksdb.read-amplification` directly. A healthy LSM shape is defined as “read-amp < 20” and “L0-files < 1000”, looking at [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) `admission.l0_sub_level_count_overload_threshold` and `admission.l0_file_count_overload_threshold` respectively. | -| admission.wait_durations.kv-p75 | {% if include.deployment == 'self-hosted' %}admission.wait.durations.kv |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Wait time durations for requests that waited | This metric shows if CPU utilization-based admission control feature is working effectively or potentially overaggressive. This is a latency histogram of how much delay was added to the workload due to throttling by CPU control. If observing over 100ms waits for over 5 seconds while there was excess CPU capacity available, then the admission control is overly aggressive. | -| admission.wait_durations.kv-stores-p75 | {% if include.deployment == 'self-hosted' %}admission.wait.durations.kv_stores |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Wait time durations for requests that waited | This metric shows if I/O utilization-based admission control feature is working effectively or potentially overaggressive. This is a latency histogram of how much delay was added to the workload due to throttling by I/O control. If observing over 100ms waits for over 5 seconds while there was excess I/O capacity available, then the admission control is overly aggressive. | -| sys.runnable.goroutines.per.cpu | {% if include.deployment == 'self-hosted' %}sys.runnable.goroutines.per_cpu |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Average number of goroutines that are waiting to run, normalized by number of cores | If this metric has a value over 30, it indicates a CPU overload. If the condition lasts a short period of time (a few seconds), the database users are likely to experience inconsistent response times. If the condition persists for an extended period of time (tens of seconds, or minutes) the cluster may start developing stability issues. Review [CPU planning]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#cpu). +| admission.io.overload | {% if include.deployment == 'self-hosted' %}admission.io.overload |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} 1-normalized float indicating whether IO admission control considers the store as overloaded with respect to compaction out of L0 (considers sub-level and file counts). | If the value of this metric exceeds 1, then it indicates overload. You can also look at the metrics `storage.l0-num-files`, `storage.l0-sublevels` or `rocksdb.read-amplification` directly. A healthy LSM shape is defined as “read-amp < 20” and “L0-files < 1000”, looking at [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) `admission.l0_sub_level_count_overload_threshold` and `admission.l0_file_count_overload_threshold` respectively. | +| admission.wait_durations.kv-p75 | {% if include.deployment == 'self-hosted' %}admission.wait.durations.kv |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Wait time durations for requests that waited | This metric shows if CPU utilization-based admission control feature is working effectively or potentially overaggressive. This is a latency histogram of how much delay was added to the workload due to throttling by CPU control. If observing over 100ms waits for over 5 seconds while there was excess CPU capacity available, then the admission control is overly aggressive. | +| admission.wait_durations.kv-stores-p75 | {% if include.deployment == 'self-hosted' %}admission.wait.durations.kv_stores |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Wait time durations for requests that waited | This metric shows if I/O utilization-based admission control feature is working effectively or potentially overaggressive. This is a latency histogram of how much delay was added to the workload due to throttling by I/O control. If observing over 100ms waits for over 5 seconds while there was excess I/O capacity available, then the admission control is overly aggressive. | +| sys.runnable.goroutines.per.cpu | {% if include.deployment == 'self-hosted' %}sys.runnable.goroutines.per_cpu |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Average number of goroutines that are waiting to run, normalized by number of cores | If this metric has a value over 30, it indicates a CPU overload. If the condition lasts a short period of time (a few seconds), the database users are likely to experience inconsistent response times. If the condition persists for an extended period of time (tens of seconds, or minutes) the cluster may start developing stability issues. Review [CPU planning]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#cpu). {% if include.deployment == 'self-hosted' %} ## Network @@ -80,38 +80,38 @@ The **Usage** column explains why each metric is important to visualize in a cus ## KV distributed -|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'dedicated' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | -| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'dedicated' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | -| liveness.heartbeatlatency | {% if include.deployment == 'self-hosted' %}liveness.heartbeatlatency-p90 |{% elsif include.deployment == 'dedicated' %}liveness.heartbeatlatency |{% endif %} Node liveness heartbeat latency | If this metric exceeds 1 second, it is a sign of cluster instability. | +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_cloud.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| liveness.heartbeatlatency | {% if include.deployment == 'self-hosted' %}liveness.heartbeatlatency-p90 |{% elsif include.deployment == 'advanced' %}liveness.heartbeatlatency |{% endif %} Node liveness heartbeat latency | If this metric exceeds 1 second, it is a sign of cluster instability. | | liveness.livenodes | liveness.livenodes | Number of live nodes in the cluster (will be 0 if this node is not itself live) | This is a critical metric that tracks the live nodes in the cluster. | | distsender.rpc.sent.nextreplicaerror | distsender.rpc.sent.nextreplicaerror | Number of replica-addressed RPCs sent due to per-replica errors | [RPC](architecture/overview.html#overview) errors do not necessarily indicate a problem. This metric tracks remote procedure calls that return a status value other than "success". A non-success status of an RPC should not be misconstrued as a network transport issue. It is database code logic executed on another cluster node. The non-success status is a result of an orderly execution of an RPC that reports a specific logical condition. | | distsender.errors.notleaseholder | distsender.errors.notleaseholder | Number of NotLeaseHolderErrors encountered from replica-addressed RPCs | Errors of this type are normal during elastic cluster topology changes when leaseholders are actively rebalancing. They are automatically retried. However they may create occasional response time spikes. In that case, this metric may provide the explanation of the cause. | ## KV replication -|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'dedicated' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | -| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'dedicated' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_cloud.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | | leases.transfers.success | leases.transfers.success | Number of successful lease transfers | A high number of [lease](architecture/replication-layer.html#leases) transfers is not a negative or positive signal, rather it is a reflection of the elastic cluster activities. For example, this metric is high during cluster topology changes. A high value is often the reason for NotLeaseHolderErrors which are normal and expected during rebalancing. Observing this metric may provide a confirmation of the cause of such errors. | -| rebalancing.queriespersecond | {% if include.deployment == 'self-hosted' %}rebalancing.queriespersecond |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of kv-level requests received per second by the store, considering the last 30 minutes, as used in rebalancing decisions. | This metric shows hotspots along the queries per second (QPS) dimension. It provides insights into the ongoing rebalancing activities. | +| rebalancing.queriespersecond | {% if include.deployment == 'self-hosted' %}rebalancing.queriespersecond |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of kv-level requests received per second by the store, considering the last 30 minutes, as used in rebalancing decisions. | This metric shows hotspots along the queries per second (QPS) dimension. It provides insights into the ongoing rebalancing activities. | | ranges | ranges | Number of ranges | This metric provides a measure of the scale of the data size. | -| replicas | {% if include.deployment == 'self-hosted' %}replicas.total |{% elsif include.deployment == 'dedicated' %}replicas |{% endif %} Number of replicas | This metric provides an essential characterization of the data distribution across cluster nodes. | +| replicas | {% if include.deployment == 'self-hosted' %}replicas.total |{% elsif include.deployment == 'advanced' %}replicas |{% endif %} Number of replicas | This metric provides an essential characterization of the data distribution across cluster nodes. | | replicas.leaseholders | replicas.leaseholders | Number of lease holders | This metric provides an essential characterization of the data processing points across cluster nodes. | | ranges.underreplicated | ranges.underreplicated | Number of ranges with fewer live replicas than the replication target | This metric is an indicator of [replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues). It shows whether the cluster has data that is not conforming to resilience goals. The next step is to determine the corresponding database object, such as the table or index, of these under-replicated ranges and whether the under-replication is temporarily expected. Use the statement `SELECT table_name, index_name FROM [SHOW RANGES WITH INDEXES] WHERE range_id = {id of under-replicated range};`| | ranges.unavailable | ranges.unavailable | Number of ranges with fewer live replicas than needed for quorum | This metric is an indicator of [replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues). It shows whether the cluster is unhealthy and can impact workload. If an entire range is unavailable, then it will be unable to process queries. | -| queue.replicate.replacedecommissioningreplica.error | {% if include.deployment == 'self-hosted' %}queue.replicate.replacedecommissioningreplica.error.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of failed decommissioning replica replacements processed by the replicate queue | Refer to [Decommission the node]({% link {{ page.version.version }}/node-shutdown.md %}?filters=decommission#decommission-the-node). | -| range.splits | {% if include.deployment == 'self-hosted' %}range.splits.total |{% elsif include.deployment == 'dedicated' %}range.splits |{% endif %} Number of range splits | This metric indicates how fast a workload is scaling up. Spikes can indicate resource hot spots since the [split heuristic is based on QPS]({% link {{ page.version.version }}/load-based-splitting.md %}#control-load-based-splitting-threshold). To understand whether hot spots are an issue and with which tables and indexes they are occurring, correlate this metric with other metrics such as CPU usage, such as `sys.cpu.combined.percent-normalized`, or use the [**Hot Ranges** page]({% link {{ page.version.version }}/ui-hot-ranges-page.md %}). | -| range.merges | {% if include.deployment == 'self-hosted' %}range.merges.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of range merges | This metric indicates how fast a workload is scaling down. Merges are Cockroach's [optimization for performance](architecture/distribution-layer.html#range-merges). This metric indicates that there have been deletes in the workload. | +| queue.replicate.replacedecommissioningreplica.error | {% if include.deployment == 'self-hosted' %}queue.replicate.replacedecommissioningreplica.error.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of failed decommissioning replica replacements processed by the replicate queue | Refer to [Decommission the node]({% link {{ page.version.version }}/node-shutdown.md %}?filters=decommission#decommission-the-node). | +| range.splits | {% if include.deployment == 'self-hosted' %}range.splits.total |{% elsif include.deployment == 'advanced' %}range.splits |{% endif %} Number of range splits | This metric indicates how fast a workload is scaling up. Spikes can indicate resource hot spots since the [split heuristic is based on QPS]({% link {{ page.version.version }}/load-based-splitting.md %}#control-load-based-splitting-threshold). To understand whether hot spots are an issue and with which tables and indexes they are occurring, correlate this metric with other metrics such as CPU usage, such as `sys.cpu.combined.percent-normalized`, or use the [**Hot Ranges** page]({% link {{ page.version.version }}/ui-hot-ranges-page.md %}). | +| range.merges | {% if include.deployment == 'self-hosted' %}range.merges.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of range merges | This metric indicates how fast a workload is scaling down. Merges are Cockroach's [optimization for performance](architecture/distribution-layer.html#range-merges). This metric indicates that there have been deletes in the workload. | ## SQL -|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'dedicated' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | -| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'dedicated' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_cloud.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | | sql.conns | sql.conns | Number of active SQL connections | This metric shows the number of connections as well as the distribution, or balancing, of connections across cluster nodes. An imbalance can lead to nodes becoming overloaded. Review [Connection Pooling]({% link {{ page.version.version }}/connection-pooling.md %}). | -| sql.new_conns | {% if include.deployment == 'self-hosted' %}sql.new_conns.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of new connection attempts. | The rate of this metric shows how frequently new connections are being established. This can be useful in determining if a high rate of incoming new connections is causing additional load on the server due to a misconfigured application. | +| sql.new_conns | {% if include.deployment == 'self-hosted' %}sql.new_conns.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of new connection attempts. | The rate of this metric shows how frequently new connections are being established. This can be useful in determining if a high rate of incoming new connections is causing additional load on the server due to a misconfigured application. | | sql.txns.open | sql.txns.open | Number of currently open user SQL transactions | This metric should roughly correspond to the number of cores * 4. If this metric is consistently larger, scale out the cluster. | | sql.statements.active | sql.statements.active | Number of currently active user SQL statements | This high-level metric reflects workload volume. | -| sql.failure.count | {% if include.deployment == 'self-hosted' %}sql.failure |{% elsif include.deployment == 'dedicated' %}sql.failure.count |{% endif %} Number of statements resulting in a planning or runtime error | This metric is a high-level indicator of workload and application degradation with query failures. Use the [Insights page]({% link {{ page.version.version }}/ui-insights-page.md %}) to find failed executions with their error code to troubleshoot or use application-level logs, if instrumented, to determine the cause of error. | -| sql.full.scan.count | {% if include.deployment == 'self-hosted' %}sql.full.scan |{% elsif include.deployment == 'dedicated' %}sql.full.scan.count |{% endif %} Number of full table or index scans | This metric is a high-level indicator of potentially suboptimal query plans in the workload that may require index tuning and maintenance. To identify the [statements with a full table scan]({% link {{ page.version.version }}/performance-recipes.md %}#statements-with-full-table-scans), use `SHOW FULL TABLE SCAN` or the [**SQL Activity Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}) with the corresponding metric time frame. The **Statements** page also includes [explain plans]({% link {{ page.version.version }}/ui-statements-page.md %}#explain-plans) and [index recommendations]({% link {{ page.version.version }}/ui-statements-page.md %}#insights). Not all full scans are necessarily bad especially over smaller tables. | +| sql.failure.count | {% if include.deployment == 'self-hosted' %}sql.failure |{% elsif include.deployment == 'advanced' %}sql.failure.count |{% endif %} Number of statements resulting in a planning or runtime error | This metric is a high-level indicator of workload and application degradation with query failures. Use the [Insights page]({% link {{ page.version.version }}/ui-insights-page.md %}) to find failed executions with their error code to troubleshoot or use application-level logs, if instrumented, to determine the cause of error. | +| sql.full.scan.count | {% if include.deployment == 'self-hosted' %}sql.full.scan |{% elsif include.deployment == 'advanced' %}sql.full.scan.count |{% endif %} Number of full table or index scans | This metric is a high-level indicator of potentially suboptimal query plans in the workload that may require index tuning and maintenance. To identify the [statements with a full table scan]({% link {{ page.version.version }}/performance-recipes.md %}#statements-with-full-table-scans), use `SHOW FULL TABLE SCAN` or the [**SQL Activity Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}) with the corresponding metric time frame. The **Statements** page also includes [explain plans]({% link {{ page.version.version }}/ui-statements-page.md %}#explain-plans) and [index recommendations]({% link {{ page.version.version }}/ui-statements-page.md %}#insights). Not all full scans are necessarily bad especially over smaller tables. | | sql.insert.count | sql.insert.count | Number of SQL INSERT statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. | | sql.update.count | sql.update.count | Number of SQL UPDATE statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. | | sql.delete.count | sql.delete.count | Number of SQL DELETE statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. | @@ -123,62 +123,62 @@ The **Usage** column explains why each metric is important to visualize in a cus | sql.txn.abort.count | sql.txn.abort.count | Number of SQL transaction abort errors | This high-level metric reflects workload performance. A persistently high number of SQL transaction abort errors may negatively impact the workload performance and needs to be investigated. | | sql.service.latency-p90, sql.service.latency-p99 | sql.service.latency | Latency of SQL request execution | These high-level metrics reflect workload performance. Monitor these metrics to understand latency over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. The [**Statements page**]({% link {{ page.version.version }}/ui-statements-page.md %}) has P90 Latency and P99 latency columns to enable correlation with this metric. | | sql.txn.latency-p90, sql.txn.latency-p99 | sql.txn.latency | Latency of SQL transactions | These high-level metrics provide a latency histogram of all executed SQL transactions. These metrics provide an overview of the current SQL workload. | -| txnwaitqueue.deadlocks_total | {% if include.deployment == 'self-hosted' %}txnwaitqueue.deadlocks.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of deadlocks detected by the transaction wait queue | Alert on this metric if its value is greater than zero, especially if transaction throughput is lower than expected. Applications should be able to detect and recover from deadlock errors. However, transaction performance and throughput can be maximized if the application logic avoids deadlock conditions in the first place, for example, by keeping transactions as short as possible. | -| sql.distsql.contended_queries.count | {% if include.deployment == 'self-hosted' %}sql.distsql.contended.queries |{% elsif include.deployment == 'dedicated' %} sql.distsql.contended.queries |{% endif %} Number of SQL queries that experienced contention | This metric is incremented whenever there is a non-trivial amount of contention experienced by a statement whether read-write or write-write conflicts. Monitor this metric to correlate possible workload performance issues to contention conflicts. | +| txnwaitqueue.deadlocks_total | {% if include.deployment == 'self-hosted' %}txnwaitqueue.deadlocks.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of deadlocks detected by the transaction wait queue | Alert on this metric if its value is greater than zero, especially if transaction throughput is lower than expected. Applications should be able to detect and recover from deadlock errors. However, transaction performance and throughput can be maximized if the application logic avoids deadlock conditions in the first place, for example, by keeping transactions as short as possible. | +| sql.distsql.contended_queries.count | {% if include.deployment == 'self-hosted' %}sql.distsql.contended.queries |{% elsif include.deployment == 'advanced' %} sql.distsql.contended.queries |{% endif %} Number of SQL queries that experienced contention | This metric is incremented whenever there is a non-trivial amount of contention experienced by a statement whether read-write or write-write conflicts. Monitor this metric to correlate possible workload performance issues to contention conflicts. | | sql.conn.latency-p90, sql.conn.latency-p99 | sql.conn.latency | Latency to establish and authenticate a SQL connection | These metrics characterize the database connection latency which can affect the application performance, for example, by having slow startup times. | | txn.restarts.serializable | txn.restarts.serializable | Number of restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | | txn.restarts.writetooold | txn.restarts.writetooold | Number of restarts due to a concurrent writer committing first | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | -| txn.restarts.writetoooldmulti | {% if include.deployment == 'self-hosted' %}txn.restarts.writetoooldmulti.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of restarts due to multiple concurrent writers committing first | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | -| txn.restarts.unknown | {% if include.deployment == 'self-hosted' %}txn.restarts.unknown.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of restarts due to a unknown reasons | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | -| txn.restarts.txnpush | {% if include.deployment == 'self-hosted' %}txn.restarts.txnpush.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of restarts due to a transaction push failure | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | -| txn.restarts.txnaborted | {% if include.deployment == 'self-hosted' %}txn.restarts.txnaborted.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of restarts due to an abort by a concurrent transaction | The errors tracked by this metric are generally due to deadlocks. Deadlocks can often be prevented with a considered transaction design. Identify the conflicting transactions involved in the deadlocks, then, if possible, redesign the business logic implementation prone to deadlocks. | +| txn.restarts.writetoooldmulti | {% if include.deployment == 'self-hosted' %}txn.restarts.writetoooldmulti.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to multiple concurrent writers committing first | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | +| txn.restarts.unknown | {% if include.deployment == 'self-hosted' %}txn.restarts.unknown.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to a unknown reasons | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | +| txn.restarts.txnpush | {% if include.deployment == 'self-hosted' %}txn.restarts.txnpush.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to a transaction push failure | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | +| txn.restarts.txnaborted | {% if include.deployment == 'self-hosted' %}txn.restarts.txnaborted.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to an abort by a concurrent transaction | The errors tracked by this metric are generally due to deadlocks. Deadlocks can often be prevented with a considered transaction design. Identify the conflicting transactions involved in the deadlocks, then, if possible, redesign the business logic implementation prone to deadlocks. | ## Table Statistics -|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'dedicated' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | -| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'dedicated' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | -| jobs.auto_create_stats.resume_failed | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.resume_failed.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs which failed with a non-retryable error | This metric is a high-level indicator that automatically generated [table statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) is failing. Failed statistic creation can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. | -| jobs.auto_create_stats.currently_running | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.currently_running |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs currently running | This metric tracks the number of active automatically generated statistics jobs that could also be consuming resources. Ensure that foreground SQL traffic is not impacted by correlating this metric with SQL latency and query volume metrics. | -| jobs.auto_create_stats.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.currently_paused |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs currently considered Paused | This metric is a high-level indicator that automatically generated statistics jobs are paused which can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. | -| jobs.create_stats.currently_running | {% if include.deployment == 'self-hosted' %}jobs.create.stats.currently_running |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of create_stats jobs currently running | This metric tracks the number of active create statistics jobs that may be consuming resources. Ensure that foreground SQL traffic is not impacted by correlating this metric with SQL latency and query volume metrics. | +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_cloud.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| jobs.auto_create_stats.resume_failed | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.resume_failed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs which failed with a non-retryable error | This metric is a high-level indicator that automatically generated [table statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) is failing. Failed statistic creation can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. | +| jobs.auto_create_stats.currently_running | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs currently running | This metric tracks the number of active automatically generated statistics jobs that could also be consuming resources. Ensure that foreground SQL traffic is not impacted by correlating this metric with SQL latency and query volume metrics. | +| jobs.auto_create_stats.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs currently considered Paused | This metric is a high-level indicator that automatically generated statistics jobs are paused which can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. | +| jobs.create_stats.currently_running | {% if include.deployment == 'self-hosted' %}jobs.create.stats.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of create_stats jobs currently running | This metric tracks the number of active create statistics jobs that may be consuming resources. Ensure that foreground SQL traffic is not impacted by correlating this metric with SQL latency and query volume metrics. | ## Backup and Restore -|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'dedicated' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | -| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'dedicated' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | -| jobs.backup.currently_running | {% if include.deployment == 'self-hosted' %}jobs.backup.currently_running |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of backup jobs currently running | See Description. | -| jobs.backup.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.backup.currently_paused |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of backup jobs currently considered Paused | Monitor and alert on this metric to safeguard against an inadvertent operational error of leaving a backup job in a paused state for an extended period of time. In functional areas, a paused job can hold resources or have concurrency impact or some other negative consequence. Paused backup may break the [recovery point objective (RPO)]({% link {{ page.version.version }}/backup.md %}#performance). | -| schedules.BACKUP.failed | {% if include.deployment == 'self-hosted' %}schedules.backup.failed |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of BACKUP jobs failed | Monitor this metric and investigate backup job failures. | -| schedules.BACKUP.last-completed-time | {% if include.deployment == 'self-hosted' %}schedules.backup.last_completed_time |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} The unix timestamp of the most recently completed backup by a schedule specified as maintaining this metric | Monitor this metric to know that backups are meeting the [recovery point objective (RPO)]({% link {{ page.version.version }}/backup.md %}#performance). | +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_cloud.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| jobs.backup.currently_running | {% if include.deployment == 'self-hosted' %}jobs.backup.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of backup jobs currently running | See Description. | +| jobs.backup.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.backup.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of backup jobs currently considered Paused | Monitor and alert on this metric to safeguard against an inadvertent operational error of leaving a backup job in a paused state for an extended period of time. In functional areas, a paused job can hold resources or have concurrency impact or some other negative consequence. Paused backup may break the [recovery point objective (RPO)]({% link {{ page.version.version }}/backup.md %}#performance). | +| schedules.BACKUP.failed | {% if include.deployment == 'self-hosted' %}schedules.backup.failed |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of BACKUP jobs failed | Monitor this metric and investigate backup job failures. | +| schedules.BACKUP.last-completed-time | {% if include.deployment == 'self-hosted' %}schedules.backup.last_completed_time |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} The Unix timestamp of the most recently completed backup by a schedule specified as maintaining this metric | Monitor this metric to ensure that backups are meeting the [recovery point objective (RPO)]({% link {{ page.version.version }}/disaster-recovery-overview.md %}#resilience-strategy). Each node exports the time that it last completed a backup on behalf of the schedule. If a node is restarted, it will report `0` until it completes a backup. If all nodes are restarted, `max()` is `0` until a node completes a backup.

To make use of this metric, first, from each node, take the maximum over a rolling window equal to or greater than the backup frequency, and then take the maximum of those values across nodes. For example with a backup frequency of 60 minutes, monitor `time() - max_across_nodes(max_over_time(schedules_BACKUP_last_completed_time, 60min))`. | ## Changefeeds If [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}) are created in a CockroachDB cluster, monitor these additional metrics in your custom dashboards: -|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'dedicated' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | -| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'dedicated' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_cloud.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | | changefeed.running | changefeed.running | Number of currently running changefeeds, including sinkless | This metric tracks the total number of all running changefeeds. | -| jobs.changefeed.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.changefeed.currently_paused |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of changefeed jobs currently considered Paused | Monitor and alert on this metric to safeguard against an inadvertent operational error of leaving a changefeed job in a paused state for an extended period of time. Changefeed jobs should not be paused for a long time because the [protected timestamp prevents garbage collection]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#protected-timestamp-and-garbage-collection-monitoring). | +| jobs.changefeed.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.changefeed.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of changefeed jobs currently considered Paused | Monitor and alert on this metric to safeguard against an inadvertent operational error of leaving a changefeed job in a paused state for an extended period of time. Changefeed jobs should not be paused for a long time because the [protected timestamp prevents garbage collection]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#protected-timestamp-and-garbage-collection-monitoring). | | changefeed.failures | changefeed.failures | Total number of changefeed jobs which have failed | This metric tracks the permanent changefeed job failures that the jobs system will not try to restart. Any increase in this counter should be investigated. An alert on this metric is recommended. | | changefeed.error_retries | changefeed.error.retries | Total retryable errors encountered by all changefeeds | This metric tracks transient changefeed errors. Alert on "too many" errors, such as 50 retries in 15 minutes. For example, during a rolling upgrade this counter will increase because the changefeed jobs will restart following node restarts. There is an exponential backoff, up to 10 minutes. But if there is no rolling upgrade in process or other cluster maintenance, and the error rate is high, investigate the changefeed job. | changefeed.emitted_messages | changefeed.emitted.messages | Messages emitted by all feeds | This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the rate of changes being streamed from the CockroachDB cluster. | -| changefeed.emitted_bytes | {% if include.deployment == 'self-hosted' %}changefeed.emitted_bytes.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Bytes emitted by all feeds | This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the throughput bytes being streamed from the CockroachDB cluster. | +| changefeed.emitted_bytes | {% if include.deployment == 'self-hosted' %}changefeed.emitted_bytes.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Bytes emitted by all feeds | This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the throughput bytes being streamed from the CockroachDB cluster. | | changefeed.commit_latency | changefeed.commit.latency | The difference between the event MVCC timestamp and the time it was acknowledged by the downstream sink. If the sink batches events, then the difference between the oldest event in the batch and acknowledgement is recorded. Latency during backfill is excluded.| This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the end-to-end lag between a committed change and that change applied at the destination. | -| jobs.changefeed.protected_age_sec | {% if include.deployment == 'self-hosted' %}jobs.changefeed.protected_age_sec |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} The age of the oldest PTS record protected by changefeed jobs | [Changefeeds use protected timestamps to protect the data from being garbage collected]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#protected-timestamp-and-garbage-collection-monitoring). Ensure the protected timestamp age does not significantly exceed the [GC TTL zone configuration]({% link {{ page.version.version }}/configure-replication-zones.md %}#replication-zone-variables). Alert on this metric if the protected timestamp age is greater than 3 times the GC TTL. | +| jobs.changefeed.protected_age_sec | {% if include.deployment == 'self-hosted' %}jobs.changefeed.protected_age_sec |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} The age of the oldest PTS record protected by changefeed jobs | [Changefeeds use protected timestamps to protect the data from being garbage collected]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#protected-timestamp-and-garbage-collection-monitoring). Ensure the protected timestamp age does not significantly exceed the [GC TTL zone configuration]({% link {{ page.version.version }}/configure-replication-zones.md %}#replication-zone-variables). Alert on this metric if the protected timestamp age is greater than 3 times the GC TTL. | ## Row-Level TTL If [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) is configured for any table in a CockroachDB cluster, monitor these additional metrics in your custom dashboards: -|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'dedicated' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | -| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'dedicated' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | -| jobs.row_level_ttl.resume_completed | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.resume_completed.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs which successfully resumed to completion | If Row Level TTL is enabled, this metric should be nonzero and correspond to the `ttl_cron` setting that was chosen. If this metric is zero, it means the job is not running | -| jobs.row_level_ttl.resume_failed | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.resume_failed.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs which failed with a non-retryable error | This metric should remain at zero. Repeated errors means the Row Level TTL job is not deleting data. | -| jobs.row_level_ttl.rows_selected | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.rows_selected.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of rows selected for deletion by the row level TTL job. | Correlate this metric with the metric `jobs.row_level_ttl.rows_deleted` to ensure all the rows that should be deleted are actually getting deleted. | -| jobs.row_level_ttl.rows_deleted | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.rows_deleted.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of rows deleted by the row level TTL job. | Correlate this metric with the metric `jobs.row_level_ttl.rows_selected` to ensure all the rows that should be deleted are actually getting deleted. | -| jobs.row_level_ttl.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.currently_paused |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs currently considered Paused | Monitor this metric to ensure the Row Level TTL job does not remain paused inadvertently for an extended period. | -| jobs.row_level_ttl.currently_running | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.currently_running |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs currently running | Monitor this metric to ensure there are not too many Row Level TTL jobs running at the same time. Generally, this metric should be in the low single digits. | -| schedules.scheduled-row-level-ttl-executor.failed | {% if include.deployment == 'self-hosted' %}schedules.scheduled.row.level.ttl.executor_failed.count |{% elsif include.deployment == 'dedicated' %}NOT AVAILABLE |{% endif %} Number of scheduled-row-level-ttl-executor jobs failed | Monitor this metric to ensure the Row Level TTL job is running. If it is non-zero, it means the job could not be created. | +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_cloud.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| jobs.row_level_ttl.resume_completed | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.resume_completed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs which successfully resumed to completion | If Row Level TTL is enabled, this metric should be nonzero and correspond to the `ttl_cron` setting that was chosen. If this metric is zero, it means the job is not running | +| jobs.row_level_ttl.resume_failed | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.resume_failed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs which failed with a non-retryable error | This metric should remain at zero. Repeated errors means the Row Level TTL job is not deleting data. | +| jobs.row_level_ttl.rows_selected | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.rows_selected.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of rows selected for deletion by the row level TTL job. | Correlate this metric with the metric `jobs.row_level_ttl.rows_deleted` to ensure all the rows that should be deleted are actually getting deleted. | +| jobs.row_level_ttl.rows_deleted | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.rows_deleted.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of rows deleted by the row level TTL job. | Correlate this metric with the metric `jobs.row_level_ttl.rows_selected` to ensure all the rows that should be deleted are actually getting deleted. | +| jobs.row_level_ttl.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs currently considered Paused | Monitor this metric to ensure the Row Level TTL job does not remain paused inadvertently for an extended period. | +| jobs.row_level_ttl.currently_running | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs currently running | Monitor this metric to ensure there are not too many Row Level TTL jobs running at the same time. Generally, this metric should be in the low single digits. | +| schedules.scheduled-row-level-ttl-executor.failed | {% if include.deployment == 'self-hosted' %}schedules.scheduled.row.level.ttl.executor_failed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of scheduled-row-level-ttl-executor jobs failed | Monitor this metric to ensure the Row Level TTL job is running. If it is non-zero, it means the job could not be created. | | jobs.row_level_ttl.span_total_duration | NOT AVAILABLE | Duration for processing a span during row level TTL. | See Description. | | jobs.row_level_ttl.select_duration | NOT AVAILABLE | Duration for select requests during row level TTL. | See Description. | | jobs.row_level_ttl.delete_duration | NOT AVAILABLE | Duration for delete requests during row level TTL. | See Description. | diff --git a/src/current/_includes/v24.2/known-limitations/aost-limitations.md b/src/current/_includes/v24.2/known-limitations/aost-limitations.md new file mode 100644 index 00000000000..811c884d08d --- /dev/null +++ b/src/current/_includes/v24.2/known-limitations/aost-limitations.md @@ -0,0 +1 @@ +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v24.2/known-limitations/create-statistics-aost-limitation.md new file mode 100644 index 00000000000..09f86f51c48 --- /dev/null +++ b/src/current/_includes/v24.2/known-limitations/create-statistics-aost-limitation.md @@ -0,0 +1 @@ +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file diff --git a/src/current/releases/index.md b/src/current/releases/index.md index b695880acaa..d33404c18de 100644 --- a/src/current/releases/index.md +++ b/src/current/releases/index.md @@ -293,6 +293,7 @@ To learn what’s new in this release, refer to [Feature Highlights]({% link rel {{ r.release_date }} {% comment %} Release date of the release. {% endcomment %} {% if r.withdrawn == true %} {% comment %} Suppress download links for withdrawn releases. {% endcomment %} Withdrawn{% comment %}covers both Intel and ARM columns {% endcomment %} + {% continue %} {% elsif r.cloud_only == true %} {% comment %} Suppress download links for Cloud-first releases {% endcomment %} {{ r.cloud_only_message_short }} {% continue %} @@ -350,6 +351,7 @@ macOS downloads are **experimental**. Experimental downloads are not yet qualifi {{ r.release_date }} {% comment %} Release date of the release. {% endcomment %} {% if r.withdrawn == true %} {% comment %} Suppress withdrawn releases. {% endcomment %} Withdrawn{% comment %}covers both Intel and ARM columns {% endcomment %} + {% continue %} {% elsif r.cloud_only == true %} {% comment %} Suppress download links for Cloud-first releases {% endcomment %} {{ r.cloud_only_message_short }} {% continue %} @@ -403,6 +405,7 @@ macOS downloads are **experimental**. Experimental downloads are not yet qualifi {{ r.release_date }} {% comment %} Release date of the release. {% endcomment %} {% if r.withdrawn == true %} {% comment %} Suppress withdrawn releases. {% endcomment %} Withdrawn{% comment %}covers both Intel and ARM columns {% endcomment %} + {% continue %} {% elsif r.cloud_only == true %} {% comment %} Suppress download links for Cloud-first releases {% endcomment %} {{ r.cloud_only_message_short }} {% continue %} @@ -543,6 +546,7 @@ macOS downloads are **experimental**. Experimental downloads are not yet qualifi {% continue %} {% elsif r.cloud_only == true %} {% comment %} Suppress download links for Cloud-first releases {% endcomment %} {{ r.cloud_only_message_short }} + {% continue %} {% else %} {% comment %} Add download links for all non-withdrawn versions. {% endcomment %} {% if r.source == true %} diff --git a/src/current/v22.1/alter-backup.md b/src/current/v22.1/alter-backup.md index b85067a5bcf..efa08daf699 100644 --- a/src/current/v22.1/alter-backup.md +++ b/src/current/v22.1/alter-backup.md @@ -16,7 +16,7 @@ CockroachDB supports AWS and Google Cloud KMS keys. For more detail on encrypted ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_backup.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_backup.html %}
## Parameters @@ -58,7 +58,7 @@ To add a new KMS key to the most recent backup: ALTER BACKUP LATEST IN 's3://{BUCKET NAME}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}' ADD NEW_KMS = 'aws:///{new-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}' WITH OLD_KMS = 'aws:///{old-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}'; -~~~ +~~~ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement that points to the full backup: @@ -67,7 +67,7 @@ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement tha ALTER BACKUP '2022/03/23-213101.37' IN 's3://{BUCKET NAME}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}' ADD NEW_KMS = 'aws:///{new-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}' WITH OLD_KMS = 'aws:///{old-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}'; -~~~ +~~~ To list backup directories at a collection's URI, see [`SHOW BACKUP`](show-backup.html). @@ -80,7 +80,7 @@ To add a new KMS key to the most recent backup: ALTER BACKUP LATEST IN 'gs://{BUCKET NAME}?AUTH=specified&CREDENTIALS={ENCODED KEY}' ADD NEW_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{new key}?AUTH=specified&CREDENTIALS={encoded key}' WITH OLD_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{old key}?AUTH=specified&CREDENTIALS={encoded key}'; -~~~ +~~~ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement that points to the full backup: @@ -89,7 +89,7 @@ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement tha ALTER BACKUP '2022/03/23-213101.37' IN 'gs://{BUCKET NAME}?AUTH=specified&CREDENTIALS={ENCODED KEY}' ADD NEW_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{new key}?AUTH=specified&CREDENTIALS={encoded key}' WITH OLD_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{old key}?AUTH=specified&CREDENTIALS={encoded key}'; -~~~ +~~~ To list backup directories at a collection's URI, see [`SHOW BACKUP`](show-backup.html). diff --git a/src/current/v22.1/alter-changefeed.md b/src/current/v22.1/alter-changefeed.md index a4c765a12b7..e4b64a86524 100644 --- a/src/current/v22.1/alter-changefeed.md +++ b/src/current/v22.1/alter-changefeed.md @@ -21,7 +21,7 @@ It is necessary to [**pause**](pause-job.html) a changefeed before running the ` ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_changefeed.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_changefeed.html %}
## Parameters @@ -134,7 +134,7 @@ For more information on enabling changefeeds, see [Create and Configure Changefe The output from `ALTER CHANGEFEED` will show the `CREATE CHANGEFEED` statement with the options you've defined. After modifying a changefeed with `ALTER CHANGEFEED`, the `CREATE` description will show the fully qualified table name. - For an explanation on each of these options, see the `CREATE CHANGEFEED` [options](create-changefeed.html#options). + For an explanation on each of these options, see the `CREATE CHANGEFEED` [options](create-changefeed.html#options). 1. Resume the changefeed job with `RESUME JOB`: diff --git a/src/current/v22.1/show-create-schedule.md b/src/current/v22.1/show-create-schedule.md index c0ccb200154..cb506979616 100644 --- a/src/current/v22.1/show-create-schedule.md +++ b/src/current/v22.1/show-create-schedule.md @@ -14,7 +14,7 @@ Only members of the [`admin` role](security-reference/authorization.html#admin-r ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/show_create_schedules.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/show_create_schedules.html %}
## Parameters diff --git a/src/current/v22.2/alter-backup-schedule.md b/src/current/v22.2/alter-backup-schedule.md index cd957a7c5e5..44b63ef5d43 100644 --- a/src/current/v22.2/alter-backup-schedule.md +++ b/src/current/v22.2/alter-backup-schedule.md @@ -25,7 +25,7 @@ To alter a backup schedule, you must be the owner of the backup schedule, i.e., ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_backup_schedule.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_backup_schedule.html %}
## Parameters diff --git a/src/current/v22.2/alter-backup.md b/src/current/v22.2/alter-backup.md index 8a92c0ffd28..535217dacc4 100644 --- a/src/current/v22.2/alter-backup.md +++ b/src/current/v22.2/alter-backup.md @@ -16,7 +16,7 @@ CockroachDB supports AWS and Google Cloud KMS keys. For more detail on encrypted ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_backup.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_backup.html %}
## Parameters @@ -58,7 +58,7 @@ To add a new KMS key to the most recent backup: ALTER BACKUP LATEST IN 's3://{BUCKET NAME}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}' ADD NEW_KMS = 'aws:///{new-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}' WITH OLD_KMS = 'aws:///{old-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}'; -~~~ +~~~ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement that points to the full backup: @@ -67,7 +67,7 @@ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement tha ALTER BACKUP '2022/03/23-213101.37' IN 's3://{BUCKET NAME}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}' ADD NEW_KMS = 'aws:///{new-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}' WITH OLD_KMS = 'aws:///{old-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}'; -~~~ +~~~ To list backup directories at a collection's URI, see [`SHOW BACKUP`](show-backup.html). @@ -80,7 +80,7 @@ To add a new KMS key to the most recent backup: ALTER BACKUP LATEST IN 'gs://{BUCKET NAME}?AUTH=specified&CREDENTIALS={ENCODED KEY}' ADD NEW_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{new key}?AUTH=specified&CREDENTIALS={encoded key}' WITH OLD_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{old key}?AUTH=specified&CREDENTIALS={encoded key}'; -~~~ +~~~ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement that points to the full backup: @@ -89,7 +89,7 @@ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement tha ALTER BACKUP '2022/03/23-213101.37' IN 'gs://{BUCKET NAME}?AUTH=specified&CREDENTIALS={ENCODED KEY}' ADD NEW_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{new key}?AUTH=specified&CREDENTIALS={encoded key}' WITH OLD_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{old key}?AUTH=specified&CREDENTIALS={encoded key}'; -~~~ +~~~ To list backup directories at a collection's URI, see [`SHOW BACKUP`](show-backup.html). diff --git a/src/current/v22.2/alter-changefeed.md b/src/current/v22.2/alter-changefeed.md index 9bb1893c232..b78bb7d04e4 100644 --- a/src/current/v22.2/alter-changefeed.md +++ b/src/current/v22.2/alter-changefeed.md @@ -21,7 +21,7 @@ It is necessary to [**pause**](pause-job.html) a changefeed before running the ` ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_changefeed.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_changefeed.html %}
## Parameters diff --git a/src/current/v22.2/show-create-schedule.md b/src/current/v22.2/show-create-schedule.md index c0ccb200154..cb506979616 100644 --- a/src/current/v22.2/show-create-schedule.md +++ b/src/current/v22.2/show-create-schedule.md @@ -14,7 +14,7 @@ Only members of the [`admin` role](security-reference/authorization.html#admin-r ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/show_create_schedules.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/show_create_schedules.html %}
## Parameters diff --git a/src/current/v23.1/alter-backup-schedule.md b/src/current/v23.1/alter-backup-schedule.md index 3aef798d0ab..a0d11752048 100644 --- a/src/current/v23.1/alter-backup-schedule.md +++ b/src/current/v23.1/alter-backup-schedule.md @@ -25,7 +25,7 @@ To alter a backup schedule, you must be the owner of the backup schedule, i.e., ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_backup_schedule.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_backup_schedule.html %}
## Parameters diff --git a/src/current/v23.1/alter-backup.md b/src/current/v23.1/alter-backup.md index 6cb5e463234..ef80e1a4b6f 100644 --- a/src/current/v23.1/alter-backup.md +++ b/src/current/v23.1/alter-backup.md @@ -16,7 +16,7 @@ CockroachDB supports AWS, Azure, and Google Cloud KMS keys. For more detail on e ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_backup.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_backup.html %}
## Parameters @@ -58,7 +58,7 @@ To add a new KMS key to the most recent backup: ALTER BACKUP LATEST IN 's3://{BUCKET NAME}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}' ADD NEW_KMS = 'aws:///{new-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}' WITH OLD_KMS = 'aws:///{old-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}'; -~~~ +~~~ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement that points to the full backup: @@ -67,13 +67,13 @@ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement tha ALTER BACKUP '2022/03/23-213101.37' IN 's3://{BUCKET NAME}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}' ADD NEW_KMS = 'aws:///{new-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}' WITH OLD_KMS = 'aws:///{old-key}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}®ION={location}'; -~~~ +~~~ To list backup directories at a collection's URI, see [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}). ### Add an Azure KMS key to an encrypted backup -{% include_cached new-in.html version="v23.1" %} +{% include_cached new-in.html version="v23.1" %} To add a new KMS key to the most recent backup: @@ -82,7 +82,7 @@ To add a new KMS key to the most recent backup: ALTER BACKUP LATEST IN 'azure-blob://{container name}?AUTH=specified&AZURE_ACCOUNT_NAME={account name}&AZURE_CLIENT_ID={client ID}&AZURE_CLIENT_SECRET={client secret}&AZURE_TENANT_ID={tenant ID}' ADD NEW_KMS = 'azure-kms:///{new key}/{new key version}?AZURE_TENANT_ID={tenant ID}&AZURE_CLIENT_ID={client ID}&AZURE_CLIENT_SECRET={client secret}&AZURE_VAULT_NAME={key vault name}' WITH OLD_KMS = 'azure-kms:///{old key}/{old key version}?AZURE_TENANT_ID={tenant ID}&AZURE_CLIENT_ID={client ID}&AZURE_CLIENT_SECRET={client secret}&AZURE_VAULT_NAME={key vault name}'; -~~~ +~~~ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement that points to the full backup: @@ -91,7 +91,7 @@ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement tha ALTER BACKUP '2023/03/14-203808.29' IN 'azure-blob://{container name}?AUTH=specified&AZURE_ACCOUNT_NAME={account name}&AZURE_CLIENT_ID={client ID}&AZURE_CLIENT_SECRET={client secret}&AZURE_TENANT_ID={tenant ID}' ADD NEW_KMS = 'azure-kms:///{new key}/{new key version}?AZURE_TENANT_ID={tenant ID}&AZURE_CLIENT_ID={client ID}&AZURE_CLIENT_SECRET={client secret}&AZURE_VAULT_NAME={key vault name}' WITH OLD_KMS = 'azure-kms:///{old key}/{old key version}?AZURE_TENANT_ID={tenant ID}&AZURE_CLIENT_ID={client ID}&AZURE_CLIENT_SECRET={client secret}&AZURE_VAULT_NAME={key vault name}'; -~~~ +~~~ To list backup directories at a collection's URI, see [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}). @@ -104,7 +104,7 @@ To add a new KMS key to the most recent backup: ALTER BACKUP LATEST IN 'gs://{BUCKET NAME}?AUTH=specified&CREDENTIALS={ENCODED KEY}' ADD NEW_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{new key}?AUTH=specified&CREDENTIALS={encoded key}' WITH OLD_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{old key}?AUTH=specified&CREDENTIALS={encoded key}'; -~~~ +~~~ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement that points to the full backup: @@ -113,7 +113,7 @@ To add a new KMS key to a specific backup, issue an `ALTER BACKUP` statement tha ALTER BACKUP '2022/03/23-213101.37' IN 'gs://{BUCKET NAME}?AUTH=specified&CREDENTIALS={ENCODED KEY}' ADD NEW_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{new key}?AUTH=specified&CREDENTIALS={encoded key}' WITH OLD_KMS = 'gs:///projects/{project name}/locations/{location}/keyRings/{key ring name}/cryptoKeys/{old key}?AUTH=specified&CREDENTIALS={encoded key}'; -~~~ +~~~ To list backup directories at a collection's URI, see [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}). diff --git a/src/current/v23.1/alter-changefeed.md b/src/current/v23.1/alter-changefeed.md index ca8e241013d..13bfa518121 100644 --- a/src/current/v23.1/alter-changefeed.md +++ b/src/current/v23.1/alter-changefeed.md @@ -21,7 +21,7 @@ It is necessary to [**pause**]({% link {{ page.version.version }}/pause-job.md % ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_changefeed.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_changefeed.html %}
## Parameters diff --git a/src/current/v23.1/show-create-schedule.md b/src/current/v23.1/show-create-schedule.md index 5d4ad87ae11..7a747968aac 100644 --- a/src/current/v23.1/show-create-schedule.md +++ b/src/current/v23.1/show-create-schedule.md @@ -14,7 +14,7 @@ Only members of the [`admin` role]({% link {{ page.version.version }}/security-r ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/show_create_schedules.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/show_create_schedules.html %}
## Parameters diff --git a/src/current/v23.2/alter-backup-schedule.md b/src/current/v23.2/alter-backup-schedule.md index 3aef798d0ab..a0d11752048 100644 --- a/src/current/v23.2/alter-backup-schedule.md +++ b/src/current/v23.2/alter-backup-schedule.md @@ -25,7 +25,7 @@ To alter a backup schedule, you must be the owner of the backup schedule, i.e., ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_backup_schedule.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_backup_schedule.html %}
## Parameters diff --git a/src/current/v23.2/alter-backup.md b/src/current/v23.2/alter-backup.md index 2effdb19274..ca7320280d1 100644 --- a/src/current/v23.2/alter-backup.md +++ b/src/current/v23.2/alter-backup.md @@ -16,7 +16,7 @@ CockroachDB supports AWS, Azure, and Google Cloud KMS keys. For more detail on e ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_backup.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_backup.html %}
## Parameters diff --git a/src/current/v23.2/alter-changefeed.md b/src/current/v23.2/alter-changefeed.md index ca8e241013d..13bfa518121 100644 --- a/src/current/v23.2/alter-changefeed.md +++ b/src/current/v23.2/alter-changefeed.md @@ -21,7 +21,7 @@ It is necessary to [**pause**]({% link {{ page.version.version }}/pause-job.md % ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_changefeed.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_changefeed.html %}
## Parameters diff --git a/src/current/v23.2/show-create-schedule.md b/src/current/v23.2/show-create-schedule.md index 5d4ad87ae11..7a747968aac 100644 --- a/src/current/v23.2/show-create-schedule.md +++ b/src/current/v23.2/show-create-schedule.md @@ -14,7 +14,7 @@ Only members of the [`admin` role]({% link {{ page.version.version }}/security-r ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/show_create_schedules.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/show_create_schedules.html %}
## Parameters diff --git a/src/current/v24.1/alter-backup-schedule.md b/src/current/v24.1/alter-backup-schedule.md index e0ce4716de9..c77e82990bf 100644 --- a/src/current/v24.1/alter-backup-schedule.md +++ b/src/current/v24.1/alter-backup-schedule.md @@ -25,7 +25,7 @@ To alter a backup schedule, you must be the owner of the backup schedule, i.e., ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_backup_schedule.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_backup_schedule.html %}
## Parameters diff --git a/src/current/v24.1/alter-backup.md b/src/current/v24.1/alter-backup.md index 2effdb19274..ca7320280d1 100644 --- a/src/current/v24.1/alter-backup.md +++ b/src/current/v24.1/alter-backup.md @@ -16,7 +16,7 @@ CockroachDB supports AWS, Azure, and Google Cloud KMS keys. For more detail on e ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_backup.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_backup.html %}
## Parameters diff --git a/src/current/v24.1/alter-changefeed.md b/src/current/v24.1/alter-changefeed.md index 2ba2b3e3b75..c796e0e525b 100644 --- a/src/current/v24.1/alter-changefeed.md +++ b/src/current/v24.1/alter-changefeed.md @@ -21,7 +21,7 @@ It is necessary to [**pause**]({% link {{ page.version.version }}/pause-job.md % ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_changefeed.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_changefeed.html %}
## Parameters diff --git a/src/current/v24.1/as-of-system-time.md b/src/current/v24.1/as-of-system-time.md index 34df11a7039..5a29792d96d 100644 --- a/src/current/v24.1/as-of-system-time.md +++ b/src/current/v24.1/as-of-system-time.md @@ -43,8 +43,10 @@ negative [`INTERVAL`]({% link {{ page.version.version }}/interval.md %}) | Added `with_min_timestamp(TIMESTAMPTZ, [nearest_only])` | The minimum [timestamp]({% link {{ page.version.version }}/timestamp.md %}) at which to perform the [bounded staleness read]({% link {{ page.version.version }}/follower-reads.md %}#bounded-staleness-reads). The actual timestamp of the read may be equal to or later than the provided timestamp, but cannot be before the provided timestamp. This is useful to request a read from nearby followers, if possible, while enforcing causality between an operation at some point in time and any dependent reads. This function accepts an optional `nearest_only` argument that will error if the reads cannot be serviced from a nearby replica. `with_max_staleness(INTERVAL, [nearest_only])` | The maximum staleness interval with which to perform the [bounded staleness read]({% link {{ page.version.version }}/follower-reads.md %}#bounded-staleness-reads). The timestamp of the read can be at most this stale with respect to the current time. This is useful to request a read from nearby followers, if possible, while placing some limit on how stale results can be. Note that `with_max_staleness(INTERVAL)` is equivalent to `with_min_timestamp(now() - INTERVAL)`. This function accepts an optional `nearest_only` argument that will error if the reads cannot be serviced from a nearby replica. -{{site.data.alerts.callout_success}} To set `AS OF SYSTEM TIME follower_read_timestamp()` on all implicit and explicit read-only transactions by default, set the `default_transaction_use_follower_reads` [session variable]({% link {{ page.version.version }}/set-vars.md %}) to `on`. When `default_transaction_use_follower_reads=on` and follower reads are enabled, all read-only transactions use follower reads. + +{{site.data.alerts.callout_info}} +Although the following format is supported, it is not intended to be used by most users: HLC timestamps can be specified using a [`DECIMAL`]({% link {{ page.version.version }}/decimal.md %}). The integer part is the wall time in nanoseconds. The fractional part is the logical counter, a 10-digit integer. This is the same format as produced by the `cluster_logical_timestamp()` function. {{site.data.alerts.end}} ## Examples @@ -266,18 +268,14 @@ SQLSTATE: 42P01 Once garbage collection has occurred, `AS OF SYSTEM TIME` will no longer be able to recover lost data. For more long-term recovery solutions, consider taking either a [full or incremental backup]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}) of your cluster. {{site.data.alerts.end}} +## Known limitations + +- {% include {{ page.version.version }}/known-limitations/aost-limitations.md %} +- {% include {{ page.version.version }}/known-limitations/create-statistics-aost-limitation.md %} + ## See also - [Select Historical Data]({% link {{ page.version.version }}/select-clause.md %}#select-historical-data-time-travel) - [Time-Travel Queries](https://www.cockroachlabs.com/blog/time-travel-queries-select-witty_subtitle-the_future/) - [Follower Reads]({% link {{ page.version.version }}/follower-reads.md %}) -- [Follower Reads Topology Pattern]({% link {{ page.version.version }}/topology-follower-reads.md %}) - -## Tech note - -Although the following format is supported, it is not intended to be used by most users. - -HLC timestamps can be specified using a [`DECIMAL`]({% link {{ page.version.version }}/decimal.md %}). The -integer part is the wall time in nanoseconds. The fractional part is -the logical counter, a 10-digit integer. This is the same format as -produced by the `cluster_logical_timestamp()` function. +- [Follower Reads Topology Pattern]({% link {{ page.version.version }}/topology-follower-reads.md %}) \ No newline at end of file diff --git a/src/current/v24.1/create-statistics.md b/src/current/v24.1/create-statistics.md index 4e32438c6ab..1dfa622c46b 100644 --- a/src/current/v24.1/create-statistics.md +++ b/src/current/v24.1/create-statistics.md @@ -213,6 +213,10 @@ To view statistics jobs, there are two options: (6 rows) ~~~ +## Known limitations + +{% include {{ page.version.version }}/known-limitations/create-statistics-aost-limitation.md %} + ## See also - [Cost-Based Optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) diff --git a/src/current/v24.1/known-limitations.md b/src/current/v24.1/known-limitations.md index afca8d0b57f..21389f7f212 100644 --- a/src/current/v24.1/known-limitations.md +++ b/src/current/v24.1/known-limitations.md @@ -45,9 +45,10 @@ This section describes limitations from previous CockroachDB versions that still CockroachDB supports the [PostgreSQL wire protocol](https://www.postgresql.org/docs/current/protocol.html) and the majority of its syntax. For a list of known differences in syntax and behavior between CockroachDB and PostgreSQL, see [Features that differ from PostgreSQL]({% link {{ page.version.version }}/postgresql-compatibility.md %}#features-that-differ-from-postgresql). -#### `AS OF SYSTEM TIME` does not support placeholders +#### `AS OF SYSTEM TIME` limitations -CockroachDB does not support placeholders in [`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}). The time value must be embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) +- {% include {{ page.version.version }}/known-limitations/aost-limitations.md %} +- {% include {{ page.version.version }}/known-limitations/create-statistics-aost-limitation.md %} #### `COPY` syntax not supported by CockroachDB diff --git a/src/current/v24.1/show-create-schedule.md b/src/current/v24.1/show-create-schedule.md index 5d4ad87ae11..7a747968aac 100644 --- a/src/current/v24.1/show-create-schedule.md +++ b/src/current/v24.1/show-create-schedule.md @@ -14,7 +14,7 @@ Only members of the [`admin` role]({% link {{ page.version.version }}/security-r ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/show_create_schedules.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/show_create_schedules.html %}
## Parameters diff --git a/src/current/v24.2/alter-backup-schedule.md b/src/current/v24.2/alter-backup-schedule.md index bd586d91dcf..ccebab197ea 100644 --- a/src/current/v24.2/alter-backup-schedule.md +++ b/src/current/v24.2/alter-backup-schedule.md @@ -25,7 +25,7 @@ To alter a backup schedule, you must be the owner of the backup schedule, i.e., ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_backup_schedule.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_backup_schedule.html %}
## Parameters diff --git a/src/current/v24.2/alter-backup.md b/src/current/v24.2/alter-backup.md index 2effdb19274..ca7320280d1 100644 --- a/src/current/v24.2/alter-backup.md +++ b/src/current/v24.2/alter-backup.md @@ -16,7 +16,7 @@ CockroachDB supports AWS, Azure, and Google Cloud KMS keys. For more detail on e ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_backup.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_backup.html %}
## Parameters diff --git a/src/current/v24.2/alter-changefeed.md b/src/current/v24.2/alter-changefeed.md index 2ba2b3e3b75..c796e0e525b 100644 --- a/src/current/v24.2/alter-changefeed.md +++ b/src/current/v24.2/alter-changefeed.md @@ -21,7 +21,7 @@ It is necessary to [**pause**]({% link {{ page.version.version }}/pause-job.md % ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/alter_changefeed.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/alter_changefeed.html %}
## Parameters diff --git a/src/current/v24.2/as-of-system-time.md b/src/current/v24.2/as-of-system-time.md index 34df11a7039..5a29792d96d 100644 --- a/src/current/v24.2/as-of-system-time.md +++ b/src/current/v24.2/as-of-system-time.md @@ -43,8 +43,10 @@ negative [`INTERVAL`]({% link {{ page.version.version }}/interval.md %}) | Added `with_min_timestamp(TIMESTAMPTZ, [nearest_only])` | The minimum [timestamp]({% link {{ page.version.version }}/timestamp.md %}) at which to perform the [bounded staleness read]({% link {{ page.version.version }}/follower-reads.md %}#bounded-staleness-reads). The actual timestamp of the read may be equal to or later than the provided timestamp, but cannot be before the provided timestamp. This is useful to request a read from nearby followers, if possible, while enforcing causality between an operation at some point in time and any dependent reads. This function accepts an optional `nearest_only` argument that will error if the reads cannot be serviced from a nearby replica. `with_max_staleness(INTERVAL, [nearest_only])` | The maximum staleness interval with which to perform the [bounded staleness read]({% link {{ page.version.version }}/follower-reads.md %}#bounded-staleness-reads). The timestamp of the read can be at most this stale with respect to the current time. This is useful to request a read from nearby followers, if possible, while placing some limit on how stale results can be. Note that `with_max_staleness(INTERVAL)` is equivalent to `with_min_timestamp(now() - INTERVAL)`. This function accepts an optional `nearest_only` argument that will error if the reads cannot be serviced from a nearby replica. -{{site.data.alerts.callout_success}} To set `AS OF SYSTEM TIME follower_read_timestamp()` on all implicit and explicit read-only transactions by default, set the `default_transaction_use_follower_reads` [session variable]({% link {{ page.version.version }}/set-vars.md %}) to `on`. When `default_transaction_use_follower_reads=on` and follower reads are enabled, all read-only transactions use follower reads. + +{{site.data.alerts.callout_info}} +Although the following format is supported, it is not intended to be used by most users: HLC timestamps can be specified using a [`DECIMAL`]({% link {{ page.version.version }}/decimal.md %}). The integer part is the wall time in nanoseconds. The fractional part is the logical counter, a 10-digit integer. This is the same format as produced by the `cluster_logical_timestamp()` function. {{site.data.alerts.end}} ## Examples @@ -266,18 +268,14 @@ SQLSTATE: 42P01 Once garbage collection has occurred, `AS OF SYSTEM TIME` will no longer be able to recover lost data. For more long-term recovery solutions, consider taking either a [full or incremental backup]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}) of your cluster. {{site.data.alerts.end}} +## Known limitations + +- {% include {{ page.version.version }}/known-limitations/aost-limitations.md %} +- {% include {{ page.version.version }}/known-limitations/create-statistics-aost-limitation.md %} + ## See also - [Select Historical Data]({% link {{ page.version.version }}/select-clause.md %}#select-historical-data-time-travel) - [Time-Travel Queries](https://www.cockroachlabs.com/blog/time-travel-queries-select-witty_subtitle-the_future/) - [Follower Reads]({% link {{ page.version.version }}/follower-reads.md %}) -- [Follower Reads Topology Pattern]({% link {{ page.version.version }}/topology-follower-reads.md %}) - -## Tech note - -Although the following format is supported, it is not intended to be used by most users. - -HLC timestamps can be specified using a [`DECIMAL`]({% link {{ page.version.version }}/decimal.md %}). The -integer part is the wall time in nanoseconds. The fractional part is -the logical counter, a 10-digit integer. This is the same format as -produced by the `cluster_logical_timestamp()` function. +- [Follower Reads Topology Pattern]({% link {{ page.version.version }}/topology-follower-reads.md %}) \ No newline at end of file diff --git a/src/current/v24.2/changefeed-sinks.md b/src/current/v24.2/changefeed-sinks.md index 1a28432b699..b45fe753d6b 100644 --- a/src/current/v24.2/changefeed-sinks.md +++ b/src/current/v24.2/changefeed-sinks.md @@ -212,6 +212,62 @@ See the [Changefeed Examples]({% link {{ page.version.version }}/changefeed-exam {% include {{ page.version.version }}/cdc/note-changefeed-message-page.md %} +## Amazon MSK + +{{site.data.alerts.callout_info}} +On CockroachDB {{ site.data.products.core }} clusters, you must create instances in the same VPC as the MSK or MSK Serverless cluster in order for the changefeed to authenticate successfully. + +If you would like to connect a CockroachDB {{ site.data.products.dedicated }} cluster to an Amazon MSK cluster, contact your Cockroach Labs account team. +{{site.data.alerts.end}} + +Changefeeds can deliver messages to Amazon MSK clusters ([Amazon Managed Streaming for Apache Kafka](https://docs.aws.amazon.com/msk/latest/developerguide/what-is-msk.html)). Amazon MSK cluster types include: [MSK](https://docs.aws.amazon.com/msk/latest/developerguide/create-cluster.html) and [MSK Serverless](https://docs.aws.amazon.com/msk/latest/developerguide/serverless.html). Changefeeds support the following authentication methods for these MSK cluster types: + +- MSK: `SCRAM` or `IAM` +- MSK Serverless: `IAM` + +{% include_cached new-in.html version="v24.2" %} Changefeeds can deliver messages to MSK and MSK Serverless clusters using AWS IAM roles. + +{% comment %}will change out the links here to CRDB tutorials in a follow-up PR.{% endcomment %} + +For initial setup guides, refer to the AWS documentation: + +- [MSK clusters](https://docs.aws.amazon.com/msk/latest/developerguide/getting-started.html) +- [MSK Serverless clusters](https://docs.aws.amazon.com/msk/latest/developerguide/serverless-getting-started.html) + +Changefeeds connecting to Amazon MSK clusters use the `kafka://` scheme. The example URIs show the necessary parameters for MSK and MSK Serverless clusters depending on the authentication type: + +- To connect to an MSK cluster using `SCRAM` authentication, you must include the following parameters in the URI: + + ~~~ + kafka://{cluster_endpoint}/?tls_enabled=true&sasl_enabled=true&sasl_mechanism=SCRAM-SHA-512&sasl_user={user}&sasl_password={password} + ~~~ + + For SCRAM authentication, add your SASL username and password to the URI. + +- To connect to an MSK or MSK Serverless cluster using AWS IAM roles, you must include the following parameters in the URI: + + ~~~ + kafka://{cluster_endpoint}/?tls_enabled=true&sasl_enabled=true&sasl_mechanism=AWS_MSK_IAM&sasl_aws_region={region}&sasl_aws_iam_role_arn={arn}&sasl_aws_iam_session_name={your_session_name} + ~~~ + + For IAM authentication, add the MSK cluster region, IAM role ARN, and session name to the URI. + +This table outlines the available parameters for Amazon MSK URIs: + +URI Parameter | Description +---------------+------------------------------------------------------------------ +`cluster_endpoint` | The endpoint listed for your Amazon MSK cluster in the AWS Console. For example, `boot-a1test.c3.kafka-serverless.us-east-2.amazonaws.com:9098`. +`sasl_aws_iam_role_arn` | The ARN for the IAM role that has the permissions to create a topic and send data to the topic. +`sasl_aws_iam_session_name` | The user-specified string that identifies the session in AWS. +`sasl_aws_region` | The region of the Amazon MSK cluster. +`sasl_enabled` | Enable SASL authentication. Set this to `true`. +`sasl_mechanism` | Set to `AWS_MSK_IAM`, `SCRAM-SHA-512`, or `SCRAM-SHA-256`. +`sasl_password` | Your SASL password. +`sasl_user` | Your SASL username. +`tls_enabled` | Enable Transport Layer Security (TLS) on the connection to Amazon MSK clusters. Set this to `true`. + +For more detail on each of these parameters, refer to [Query Parameters]({% link {{ page.version.version }}/create-changefeed.md %}#query-parameters). + ## Confluent Cloud Changefeeds can deliver messages to Kafka clusters hosted on [Confluent Cloud](https://www.confluent.io/confluent-cloud/tryfree/). diff --git a/src/current/v24.2/create-changefeed.md b/src/current/v24.2/create-changefeed.md index 8bda3fd3f04..29513fd0db3 100644 --- a/src/current/v24.2/create-changefeed.md +++ b/src/current/v24.2/create-changefeed.md @@ -85,7 +85,7 @@ Query parameters include: Parameter |
Sink Type
|
Type
| Description -------------------+-----------------------------------------------+-------------------------------------+------------------------------------------------------------ `ASSUME_ROLE` | [Amazon S3]({% link {{ page.version.version }}/changefeed-sinks.md %}), [Google Cloud Storage]({% link {{ page.version.version }}/changefeed-sinks.md %}#cloud-storage-sink), [Google Cloud Pub/Sub]({% link {{ page.version.version }}/changefeed-sinks.md %}#google-cloud-pub-sub) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | {% include {{ page.version.version }}/misc/assume-role-description.md %} -`AUTH` | [Amazon S3]({% link {{ page.version.version }}/changefeed-sinks.md %}), [Google Cloud Storage]({% link {{ page.version.version }}/changefeed-sinks.md %}#cloud-storage-sink), [Google Cloud Pub/Sub]({% link {{ page.version.version }}/changefeed-sinks.md %}#google-cloud-pub-sub), [Azure Blob Storage]({% link {{ page.version.version }}/changefeed-sinks.md %}#cloud-storage-sink) | The authentication parameter can define either `specified` (default) or `implicit` authentication. To use `specified` authentication, pass your [Service Account](https://cloud.google.com/iam/docs/understanding-service-accounts) credentials with the URI. To use `implicit` authentication, configure these credentials via an environment variable. Refer to the [Cloud Storage Authentication page]({% link {{ page.version.version }}/cloud-storage-authentication.md %}) page for examples of each of these. +`AUTH` | [Amazon S3]({% link {{ page.version.version }}/changefeed-sinks.md %}), [Google Cloud Storage]({% link {{ page.version.version }}/changefeed-sinks.md %}#cloud-storage-sink), [Google Cloud Pub/Sub]({% link {{ page.version.version }}/changefeed-sinks.md %}#google-cloud-pub-sub), [Azure Blob Storage]({% link {{ page.version.version }}/changefeed-sinks.md %}#cloud-storage-sink) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The authentication parameter can define either `specified` (default) or `implicit` authentication. To use `specified` authentication, pass your [Service Account](https://cloud.google.com/iam/docs/understanding-service-accounts) credentials with the URI. To use `implicit` authentication, configure these credentials via an environment variable. Refer to the [Cloud Storage Authentication page]({% link {{ page.version.version }}/cloud-storage-authentication.md %}) page for examples of each of these. `api_key` | [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The API key created for the cluster in Confluent Cloud. `api_secret` | [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The API key's secret generated in Confluent Cloud. **Note:** This must be [URL-encoded](https://www.urlencoder.org/) before passing into the connection string. `ca_cert` | [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), [webhook]({% link {{ page.version.version }}/changefeed-sinks.md %}#webhook-sink), [Confluent schema registry](https://docs.confluent.io/platform/current/schema-registry/index.html) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The base64-encoded `ca_cert` file. Specify `ca_cert` for a Kafka sink, webhook sink, and/or a Confluent schema registry.

For usage with a Kafka sink, see [Kafka Sink URI]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka).

It's necessary to state `https` in the schema registry's address when passing `ca_cert`:
`confluent_schema_registry='https://schema_registry:8081?ca_cert=LS0tLS1CRUdJTiBDRVJUSUZ'`
See [`confluent_schema_registry`](#confluent-schema-registry) for more detail on using this option.

Note: To encode your `ca.cert`, run `base64 -w 0 ca.cert`. @@ -95,19 +95,22 @@ Parameter |
Sink Type
|
`insecure_tls_skip_verify` | [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), [webhook]({% link {{ page.version.version }}/changefeed-sinks.md %}#webhook-sink) | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) | If `true`, disable client-side validation of responses. Note that a CA certificate is still required; this parameter means that the client will not verify the certificate. **Warning:** Use this query parameter with caution, as it creates [MITM](https://wikipedia.org/wiki/Man-in-the-middle_attack) vulnerabilities unless combined with another method of authentication.

**Default:** `false` `partition_format` | [cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#cloud-storage-sink) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Specify how changefeed [file paths](#general-file-format) are partitioned in cloud storage sinks. Use `partition_format` with the following values:

  • `daily` is the default behavior that organizes directories by dates (`2022-05-18/`, `2022-05-19/`, etc.).
  • `hourly` will further organize directories by hour within each date directory (`2022-05-18/06`, `2022-05-18/07`, etc.).
  • `flat` will not partition the files at all.

For example: `CREATE CHANGEFEED FOR TABLE users INTO 'gs://...?AUTH...&partition_format=hourly'`

**Default:** `daily` `S3_STORAGE_CLASS` | [Amazon S3 cloud storage sink]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-s3) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Specify the Amazon S3 storage class for files created by the changefeed. See [Create a changefeed with an S3 storage class](#create-a-changefeed-with-an-s3-storage-class) for the available classes and an example.

**Default:** `STANDARD` +New in v24.2:`sasl_aws_iam_role_arn` | [Amazon MSK]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The ARN for the IAM role that has the permissions to create a topic and send data to the topic. For more details on setting up an Amazon MSK cluster with an IAM role, refer to [the AWS documentation](https://docs.aws.amazon.com/msk/latest/developerguide/serverless-getting-started.html). +New in v24.2:`sasl_aws_iam_session_name` | [Amazon MSK]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The user-specified string that identifies the session in AWS. +New in v24.2:`sasl_aws_region` | [Amazon MSK]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The region of the Amazon MSK cluster. `sasl_client_id` | [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Client ID for OAuth authentication from a third-party provider. This parameter is only applicable with `sasl_mechanism=OAUTHBEARER`. `sasl_client_secret` | [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Client secret for OAuth authentication from a third-party provider. This parameter is only applicable with `sasl_mechanism=OAUTHBEARER`. **Note:** You must [base64 encode](https://www.base64encode.org/) this value when passing it in as part of a sink URI. -`sasl_enabled` | [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs), [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) | If `true`, the authentication protocol can be set to `SCRAM` or `PLAIN` using the `sasl_mechanism` parameter. You must have `tls_enabled` set to `true` to use SASL.

For Confluent Cloud and Azure Event Hubs sinks, this is set to `true` by default.

**Default:** `false` +`sasl_enabled` | [Amazon MSK]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk), [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs), [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) | If `true`, set the authentication protocol with the [`sasl_mechanism`](#sasl-mechanism) parameter. You must have `tls_enabled` set to `true` to use SASL.

For Confluent Cloud and Azure Event Hubs sinks, this is set to `true` by default.

**Default:** `false` `sasl_grant_type` | [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Override the default OAuth client credentials grant type for other implementations. This parameter is only applicable with `sasl_mechanism=OAUTHBEARER`. `sasl_handshake` | [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs), [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) | For Confluent Cloud and Azure Event Hubs sinks, this is set to `true` by default. -`sasl_mechanism` | [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs), [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Can be set to [`OAUTHBEARER`](https://docs.confluent.io/platform/current/kafka/authentication_sasl/authentication_sasl_oauth.html), [`SCRAM-SHA-256`](https://docs.confluent.io/platform/current/kafka/authentication_sasl/authentication_sasl_scram.html), [`SCRAM-SHA-512`](https://docs.confluent.io/platform/current/kafka/authentication_sasl/authentication_sasl_scram.html), or [`PLAIN`](https://docs.confluent.io/current/kafka/authentication_sasl/authentication_sasl_plain.html). A `sasl_user` and `sasl_password` are required.

See the [Connect to a Changefeed Kafka sink with OAuth Using Okta](connect-to-a-changefeed-kafka-sink-with-oauth-using-okta.html) tutorial for detail setting up OAuth using Okta.

For Confluent Cloud and Azure Event Hubs sinks, this is set to `PLAIN` by default.

**Default:** `PLAIN` +`sasl_mechanism` | [Amazon MSK]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk), [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs), [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Can be set to [`OAUTHBEARER`](https://docs.confluent.io/platform/current/kafka/authentication_sasl/authentication_sasl_oauth.html), [`SCRAM-SHA-256`](https://docs.confluent.io/platform/current/kafka/authentication_sasl/authentication_sasl_scram.html), [`SCRAM-SHA-512`](https://docs.confluent.io/platform/current/kafka/authentication_sasl/authentication_sasl_scram.html), or [`PLAIN`](https://docs.confluent.io/current/kafka/authentication_sasl/authentication_sasl_plain.html). A `sasl_user` and `sasl_password` are required for `PLAIN` and `SCRAM` authentication.

For Amazon MSK clusters, set to [`AWS_MSK_IAM`]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk). [`sasl_aws_iam_role_arn`](#sasl-aws-iam-role-arn), [`sasl_aws_iam_session_name`](#sasl-aws-iam-session-name), and [`sasl_aws_region`](#sasl-aws-region) are also required in the sink uri.

Refer to the [Connect to a Changefeed Kafka sink with OAuth Using Okta](connect-to-a-changefeed-kafka-sink-with-oauth-using-okta.html) tutorial for detail setting up OAuth using Okta.

For Confluent Cloud and Azure Event Hubs sinks, `sasl_mechanism=PLAIN` is required but set automatically by CockroachDB.

**Default:** `PLAIN` `sasl_scopes` | [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | A list of scopes that the OAuth token should have access for. This parameter is only applicable with `sasl_mechanism=OAUTHBEARER`. `sasl_token_url` | [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Client token URL for OAuth authentication from a third-party provider. **Note:** You must [URL encode](https://www.urlencoder.org/) this value before passing in a URI. This parameter is only applicable with `sasl_mechanism=OAUTHBEARER`. -`sasl_user` | [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Your SASL username. -`sasl_password` | [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Your SASL password. **Note:** Passwords should be [URL encoded](https://wikipedia.org/wiki/Percent-encoding) since the value can contain characters that would cause authentication to fail. +`sasl_user` | [Amazon MSK]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk), [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Your SASL username. +`sasl_password` | [Amazon MSK]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk), [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Your SASL password. **Note:** Passwords should be [URL encoded](https://wikipedia.org/wiki/Percent-encoding) since the value can contain characters that would cause authentication to fail. `shared_access_key` | [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The URL-encoded key for your Event Hub shared access policy. -<`shared_access_key_name` | [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The name of your Event Hub shared access policy. -`tls_enabled` | [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) | If `true`, enable Transport Layer Security (TLS) on the connection to Kafka. This can be used with a `ca_cert` (see below).

For Confluent Cloud and Azure Event Hubs sinks, this is set to `true` by default.

**Default:** `false` +`shared_access_key_name` | [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The name of your Event Hub shared access policy. +`tls_enabled` | [Amazon MSK]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk), [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) | If `true`, enable Transport Layer Security (TLS) on the connection to Kafka. This can be used with a `ca_cert` (see below).

For Confluent Cloud and Azure Event Hubs sinks, this is set to `true` by default.

**Default:** `false` `topic_name` | [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs), [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud), [GC Pub/Sub]({% link {{ page.version.version }}/changefeed-sinks.md %}#google-cloud-pub-sub) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Allows arbitrary topic naming for Kafka and GC Pub/Sub topics. See the [Kafka topic naming limitations]({% link {{ page.version.version }}/changefeed-sinks.md %}#topic-naming) or [GC Pub/Sub topic naming]({% link {{ page.version.version }}/changefeed-sinks.md %}#pub-sub-topic-naming) for detail on supported characters etc.

For example, `CREATE CHANGEFEED FOR foo,bar INTO 'kafka://sink?topic_name=all'` will emit all records to a topic named `all`. Note that schemas will still be registered separately. When using Kafka, this parameter can be combined with the [`topic_prefix` parameter](#topic-prefix) (this is not supported for GC Pub/Sub).

**Default:** table name. `topic_prefix` | [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs), [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Adds a prefix to all topic names.

For example, `CREATE CHANGEFEED FOR TABLE foo INTO 'kafka://...?topic_prefix=bar_'` would emit rows under the topic `bar_foo` instead of `foo`. diff --git a/src/current/v24.2/create-statistics.md b/src/current/v24.2/create-statistics.md index 4e32438c6ab..1dfa622c46b 100644 --- a/src/current/v24.2/create-statistics.md +++ b/src/current/v24.2/create-statistics.md @@ -213,6 +213,10 @@ To view statistics jobs, there are two options: (6 rows) ~~~ +## Known limitations + +{% include {{ page.version.version }}/known-limitations/create-statistics-aost-limitation.md %} + ## See also - [Cost-Based Optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) diff --git a/src/current/v24.2/known-limitations.md b/src/current/v24.2/known-limitations.md index 82c36c94bc5..34e516e4eb0 100644 --- a/src/current/v24.2/known-limitations.md +++ b/src/current/v24.2/known-limitations.md @@ -26,9 +26,10 @@ This section describes limitations from previous CockroachDB versions that still CockroachDB supports the [PostgreSQL wire protocol](https://www.postgresql.org/docs/current/protocol.html) and the majority of its syntax. For a list of known differences in syntax and behavior between CockroachDB and PostgreSQL, see [Features that differ from PostgreSQL]({% link {{ page.version.version }}/postgresql-compatibility.md %}#features-that-differ-from-postgresql). -#### `AS OF SYSTEM TIME` does not support placeholders +#### `AS OF SYSTEM TIME` limitations -CockroachDB does not support placeholders in [`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}). The time value must be embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) +- {% include {{ page.version.version }}/known-limitations/aost-limitations.md %} +- {% include {{ page.version.version }}/known-limitations/create-statistics-aost-limitation.md %} #### `COPY` syntax not supported by CockroachDB diff --git a/src/current/v24.2/show-create-schedule.md b/src/current/v24.2/show-create-schedule.md index 5d4ad87ae11..7a747968aac 100644 --- a/src/current/v24.2/show-create-schedule.md +++ b/src/current/v24.2/show-create-schedule.md @@ -14,7 +14,7 @@ Only members of the [`admin` role]({% link {{ page.version.version }}/security-r ## Synopsis
-{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/master/grammar_svg/show_create_schedules.html %} +{% remote_include https://raw.githubusercontent.com/cockroachdb/generated-diagrams/{{ page.release_info.crdb_branch_name }}/grammar_svg/show_create_schedules.html %}
## Parameters