diff --git a/.chloggen/get-xml-nonelements.yaml b/.chloggen/get-xml-nonelements.yaml
new file mode 100644
index 000000000000..aff6e9c9aab3
--- /dev/null
+++ b/.chloggen/get-xml-nonelements.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: 'enhancement'
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: pkg/ottl
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: GetXML Converter now supports selecting text, CDATA, and attribute (value) content.
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [36821]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/.chloggen/mongodbatlasreceiver-metric-adds.yaml b/.chloggen/mongodbatlasreceiver-metric-adds.yaml
new file mode 100644
index 000000000000..7c4dabadd869
--- /dev/null
+++ b/.chloggen/mongodbatlasreceiver-metric-adds.yaml
@@ -0,0 +1,31 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: mongodbatlasreceiver
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: "Adds additional metrics to the MongoDB Atlas receiver"
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [36525]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext: |
+ Adds a number of new default disabled metrics to the MongoDB Atlas receiver. These metrics are:
+ - mongodbatlas.disk.partition.queue.depth
+ - mongodbatlas.disk.partition.throughput
+ - mongodbatlas.process.cache.ratio
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/.chloggen/postgresql-17-bgwriter.yaml b/.chloggen/postgresql-17-bgwriter.yaml
new file mode 100644
index 000000000000..d4e69abd8a50
--- /dev/null
+++ b/.chloggen/postgresql-17-bgwriter.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: bug_fix
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: postgresqlreceiver
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: "Update the postgresqlreceiver to handle new table schema for the bgwriter metrics in pg17+"
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [36784]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: [user]
diff --git a/pkg/ottl/ottlfuncs/README.md b/pkg/ottl/ottlfuncs/README.md
index 890ceef7d6fe..8a4aec76ca5b 100644
--- a/pkg/ottl/ottlfuncs/README.md
+++ b/pkg/ottl/ottlfuncs/README.md
@@ -835,6 +835,18 @@ Get all elements in the document with tag "a" that have an attribute "b" with va
- `GetXML(body, "//a[@b='c']")`
+Get `foo` from `foo`
+
+- `GetXML(body, "/a/text()")`
+
+Get `hello` from ``
+
+- `GetXML(body, "/a/text()")`
+
+Get `bar` from ``
+
+- `GetXML(body, "/a/@foo")`
+
### Hex
`Hex(value)`
diff --git a/pkg/ottl/ottlfuncs/func_get_xml.go b/pkg/ottl/ottlfuncs/func_get_xml.go
index d5390b62da63..c344dd5b8efe 100644
--- a/pkg/ottl/ottlfuncs/func_get_xml.go
+++ b/pkg/ottl/ottlfuncs/func_get_xml.go
@@ -52,10 +52,18 @@ func getXML[K any](target ottl.StringGetter[K], xPath string) ottl.ExprFunc[K] {
result := &xmlquery.Node{Type: xmlquery.DocumentNode}
for _, n := range nodes {
- if n.Type != xmlquery.ElementNode {
+ switch n.Type {
+ case xmlquery.ElementNode, xmlquery.TextNode:
+ xmlquery.AddChild(result, n)
+ case xmlquery.AttributeNode, xmlquery.CharDataNode:
+ // get the value
+ xmlquery.AddChild(result, &xmlquery.Node{
+ Type: xmlquery.TextNode,
+ Data: n.InnerText(),
+ })
+ default:
continue
}
- xmlquery.AddChild(result, n)
}
return result.OutputXML(false), nil
}
diff --git a/pkg/ottl/ottlfuncs/func_get_xml_test.go b/pkg/ottl/ottlfuncs/func_get_xml_test.go
index 26b8bfde5bc9..e9686d3696ad 100644
--- a/pkg/ottl/ottlfuncs/func_get_xml_test.go
+++ b/pkg/ottl/ottlfuncs/func_get_xml_test.go
@@ -74,22 +74,22 @@ func Test_GetXML(t *testing.T) {
want: ``,
},
{
- name: "ignore attribute selection",
+ name: "get attribute selection",
document: ``,
- xPath: "/@foo",
- want: ``,
+ xPath: "/a/@foo",
+ want: `bar`,
},
{
- name: "ignore text selection",
+ name: "get text selection",
document: `hello`,
xPath: "/a/text()",
- want: ``,
+ want: `hello`,
},
{
- name: "ignore chardata selection",
+ name: "get chardata selection",
document: ``,
xPath: "/a/text()",
- want: ``,
+ want: `hello`,
},
}
for _, tt := range tests {
diff --git a/receiver/mongodbatlasreceiver/documentation.md b/receiver/mongodbatlasreceiver/documentation.md
index ae5120d4b7a3..251f950865ae 100644
--- a/receiver/mongodbatlasreceiver/documentation.md
+++ b/receiver/mongodbatlasreceiver/documentation.md
@@ -424,7 +424,7 @@ Aggregate of MongoDB Metrics DOCUMENT_METRICS_UPDATED, DOCUMENT_METRICS_DELETED,
DB Operation Rates
-Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT
+Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT, OPCOUNTER_TTL_DELETED
| Unit | Metric Type | Value Type |
| ---- | ----------- | ---------- |
@@ -434,7 +434,7 @@ Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOU
| Name | Description | Values |
| ---- | ----------- | ------ |
-| operation | Type of database operation | Str: ``cmd``, ``query``, ``update``, ``delete``, ``getmore``, ``insert``, ``scan_and_order`` |
+| operation | Type of database operation | Str: ``cmd``, ``query``, ``update``, ``delete``, ``getmore``, ``insert``, ``scan_and_order``, ``ttl_deleted`` |
| cluster_role | Whether process is acting as replica or primary | Str: ``primary``, ``replica`` |
### mongodbatlas.process.db.operations.time
@@ -933,6 +933,58 @@ Aggregate of MongoDB Metrics MAX_SWAP_USAGE_FREE, MAX_SWAP_USAGE_USED
| ---- | ----------- | ------ |
| memory_state | Memory usage type | Str: ``resident``, ``virtual``, ``mapped``, ``computed``, ``shared``, ``free``, ``used`` |
+## Optional Metrics
+
+The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration:
+
+```yaml
+metrics:
+ :
+ enabled: true
+```
+
+### mongodbatlas.disk.partition.queue.depth
+
+Disk queue depth
+
+Aggregate of MongoDB Metrics DISK_QUEUE_DEPTH
+
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+| 1 | Gauge | Double |
+
+### mongodbatlas.disk.partition.throughput
+
+Disk throughput
+
+Aggregate of MongoDB Metrics DISK_PARTITION_THROUGHPUT_READ, DISK_PARTITION_THROUGHPUT_WRITE
+
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+| By/s | Gauge | Double |
+
+#### Attributes
+
+| Name | Description | Values |
+| ---- | ----------- | ------ |
+| disk_direction | Measurement type for disk operation | Str: ``read``, ``write``, ``total`` |
+
+### mongodbatlas.process.cache.ratio
+
+Cache ratios represented as (%)
+
+Aggregate of MongoDB Metrics CACHE_FILL_RATIO, DIRTY_FILL_RATIO
+
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+| % | Gauge | Double |
+
+#### Attributes
+
+| Name | Description | Values |
+| ---- | ----------- | ------ |
+| cache_ratio_type | Cache ratio type | Str: ``cache_fill``, ``dirty_fill`` |
+
## Resource Attributes
| Name | Description | Values | Enabled |
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go
index 46cdab5cd211..9670ae4fe76e 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go
@@ -34,8 +34,10 @@ type MetricsConfig struct {
MongodbatlasDiskPartitionIopsMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.iops.max"`
MongodbatlasDiskPartitionLatencyAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.latency.average"`
MongodbatlasDiskPartitionLatencyMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.latency.max"`
+ MongodbatlasDiskPartitionQueueDepth MetricConfig `mapstructure:"mongodbatlas.disk.partition.queue.depth"`
MongodbatlasDiskPartitionSpaceAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.space.average"`
MongodbatlasDiskPartitionSpaceMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.space.max"`
+ MongodbatlasDiskPartitionThroughput MetricConfig `mapstructure:"mongodbatlas.disk.partition.throughput"`
MongodbatlasDiskPartitionUsageAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.usage.average"`
MongodbatlasDiskPartitionUsageMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.usage.max"`
MongodbatlasDiskPartitionUtilizationAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.utilization.average"`
@@ -43,6 +45,7 @@ type MetricsConfig struct {
MongodbatlasProcessAsserts MetricConfig `mapstructure:"mongodbatlas.process.asserts"`
MongodbatlasProcessBackgroundFlush MetricConfig `mapstructure:"mongodbatlas.process.background_flush"`
MongodbatlasProcessCacheIo MetricConfig `mapstructure:"mongodbatlas.process.cache.io"`
+ MongodbatlasProcessCacheRatio MetricConfig `mapstructure:"mongodbatlas.process.cache.ratio"`
MongodbatlasProcessCacheSize MetricConfig `mapstructure:"mongodbatlas.process.cache.size"`
MongodbatlasProcessConnections MetricConfig `mapstructure:"mongodbatlas.process.connections"`
MongodbatlasProcessCPUChildrenNormalizedUsageAverage MetricConfig `mapstructure:"mongodbatlas.process.cpu.children.normalized.usage.average"`
@@ -112,12 +115,18 @@ func DefaultMetricsConfig() MetricsConfig {
MongodbatlasDiskPartitionLatencyMax: MetricConfig{
Enabled: true,
},
+ MongodbatlasDiskPartitionQueueDepth: MetricConfig{
+ Enabled: false,
+ },
MongodbatlasDiskPartitionSpaceAverage: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionSpaceMax: MetricConfig{
Enabled: true,
},
+ MongodbatlasDiskPartitionThroughput: MetricConfig{
+ Enabled: false,
+ },
MongodbatlasDiskPartitionUsageAverage: MetricConfig{
Enabled: true,
},
@@ -139,6 +148,9 @@ func DefaultMetricsConfig() MetricsConfig {
MongodbatlasProcessCacheIo: MetricConfig{
Enabled: true,
},
+ MongodbatlasProcessCacheRatio: MetricConfig{
+ Enabled: false,
+ },
MongodbatlasProcessCacheSize: MetricConfig{
Enabled: true,
},
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go
index 8c67cb277f9b..8046575b85b7 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go
@@ -31,8 +31,10 @@ func TestMetricsBuilderConfig(t *testing.T) {
MongodbatlasDiskPartitionIopsMax: MetricConfig{Enabled: true},
MongodbatlasDiskPartitionLatencyAverage: MetricConfig{Enabled: true},
MongodbatlasDiskPartitionLatencyMax: MetricConfig{Enabled: true},
+ MongodbatlasDiskPartitionQueueDepth: MetricConfig{Enabled: true},
MongodbatlasDiskPartitionSpaceAverage: MetricConfig{Enabled: true},
MongodbatlasDiskPartitionSpaceMax: MetricConfig{Enabled: true},
+ MongodbatlasDiskPartitionThroughput: MetricConfig{Enabled: true},
MongodbatlasDiskPartitionUsageAverage: MetricConfig{Enabled: true},
MongodbatlasDiskPartitionUsageMax: MetricConfig{Enabled: true},
MongodbatlasDiskPartitionUtilizationAverage: MetricConfig{Enabled: true},
@@ -40,6 +42,7 @@ func TestMetricsBuilderConfig(t *testing.T) {
MongodbatlasProcessAsserts: MetricConfig{Enabled: true},
MongodbatlasProcessBackgroundFlush: MetricConfig{Enabled: true},
MongodbatlasProcessCacheIo: MetricConfig{Enabled: true},
+ MongodbatlasProcessCacheRatio: MetricConfig{Enabled: true},
MongodbatlasProcessCacheSize: MetricConfig{Enabled: true},
MongodbatlasProcessConnections: MetricConfig{Enabled: true},
MongodbatlasProcessCPUChildrenNormalizedUsageAverage: MetricConfig{Enabled: true},
@@ -115,8 +118,10 @@ func TestMetricsBuilderConfig(t *testing.T) {
MongodbatlasDiskPartitionIopsMax: MetricConfig{Enabled: false},
MongodbatlasDiskPartitionLatencyAverage: MetricConfig{Enabled: false},
MongodbatlasDiskPartitionLatencyMax: MetricConfig{Enabled: false},
+ MongodbatlasDiskPartitionQueueDepth: MetricConfig{Enabled: false},
MongodbatlasDiskPartitionSpaceAverage: MetricConfig{Enabled: false},
MongodbatlasDiskPartitionSpaceMax: MetricConfig{Enabled: false},
+ MongodbatlasDiskPartitionThroughput: MetricConfig{Enabled: false},
MongodbatlasDiskPartitionUsageAverage: MetricConfig{Enabled: false},
MongodbatlasDiskPartitionUsageMax: MetricConfig{Enabled: false},
MongodbatlasDiskPartitionUtilizationAverage: MetricConfig{Enabled: false},
@@ -124,6 +129,7 @@ func TestMetricsBuilderConfig(t *testing.T) {
MongodbatlasProcessAsserts: MetricConfig{Enabled: false},
MongodbatlasProcessBackgroundFlush: MetricConfig{Enabled: false},
MongodbatlasProcessCacheIo: MetricConfig{Enabled: false},
+ MongodbatlasProcessCacheRatio: MetricConfig{Enabled: false},
MongodbatlasProcessCacheSize: MetricConfig{Enabled: false},
MongodbatlasProcessConnections: MetricConfig{Enabled: false},
MongodbatlasProcessCPUChildrenNormalizedUsageAverage: MetricConfig{Enabled: false},
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go
index 1e2d40f96785..356b4f1edd78 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go
@@ -102,6 +102,32 @@ var MapAttributeCacheDirection = map[string]AttributeCacheDirection{
"written_from": AttributeCacheDirectionWrittenFrom,
}
+// AttributeCacheRatioType specifies the a value cache_ratio_type attribute.
+type AttributeCacheRatioType int
+
+const (
+ _ AttributeCacheRatioType = iota
+ AttributeCacheRatioTypeCacheFill
+ AttributeCacheRatioTypeDirtyFill
+)
+
+// String returns the string representation of the AttributeCacheRatioType.
+func (av AttributeCacheRatioType) String() string {
+ switch av {
+ case AttributeCacheRatioTypeCacheFill:
+ return "cache_fill"
+ case AttributeCacheRatioTypeDirtyFill:
+ return "dirty_fill"
+ }
+ return ""
+}
+
+// MapAttributeCacheRatioType is a helper map of string to AttributeCacheRatioType attribute value.
+var MapAttributeCacheRatioType = map[string]AttributeCacheRatioType{
+ "cache_fill": AttributeCacheRatioTypeCacheFill,
+ "dirty_fill": AttributeCacheRatioTypeDirtyFill,
+}
+
// AttributeCacheStatus specifies the a value cache_status attribute.
type AttributeCacheStatus int
@@ -582,6 +608,7 @@ const (
AttributeOperationGetmore
AttributeOperationInsert
AttributeOperationScanAndOrder
+ AttributeOperationTTLDeleted
)
// String returns the string representation of the AttributeOperation.
@@ -601,6 +628,8 @@ func (av AttributeOperation) String() string {
return "insert"
case AttributeOperationScanAndOrder:
return "scan_and_order"
+ case AttributeOperationTTLDeleted:
+ return "ttl_deleted"
}
return ""
}
@@ -614,6 +643,7 @@ var MapAttributeOperation = map[string]AttributeOperation{
"getmore": AttributeOperationGetmore,
"insert": AttributeOperationInsert,
"scan_and_order": AttributeOperationScanAndOrder,
+ "ttl_deleted": AttributeOperationTTLDeleted,
}
// AttributeOplogType specifies the a value oplog_type attribute.
@@ -1038,6 +1068,55 @@ func newMetricMongodbatlasDiskPartitionLatencyMax(cfg MetricConfig) metricMongod
return m
}
+type metricMongodbatlasDiskPartitionQueueDepth struct {
+ data pmetric.Metric // data buffer for generated metric.
+ config MetricConfig // metric config provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills mongodbatlas.disk.partition.queue.depth metric with initial data.
+func (m *metricMongodbatlasDiskPartitionQueueDepth) init() {
+ m.data.SetName("mongodbatlas.disk.partition.queue.depth")
+ m.data.SetDescription("Disk queue depth")
+ m.data.SetUnit("1")
+ m.data.SetEmptyGauge()
+}
+
+func (m *metricMongodbatlasDiskPartitionQueueDepth) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.config.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMongodbatlasDiskPartitionQueueDepth) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMongodbatlasDiskPartitionQueueDepth) emit(metrics pmetric.MetricSlice) {
+ if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMongodbatlasDiskPartitionQueueDepth(cfg MetricConfig) metricMongodbatlasDiskPartitionQueueDepth {
+ m := metricMongodbatlasDiskPartitionQueueDepth{config: cfg}
+ if cfg.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
type metricMongodbatlasDiskPartitionSpaceAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
@@ -1140,6 +1219,57 @@ func newMetricMongodbatlasDiskPartitionSpaceMax(cfg MetricConfig) metricMongodba
return m
}
+type metricMongodbatlasDiskPartitionThroughput struct {
+ data pmetric.Metric // data buffer for generated metric.
+ config MetricConfig // metric config provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills mongodbatlas.disk.partition.throughput metric with initial data.
+func (m *metricMongodbatlasDiskPartitionThroughput) init() {
+ m.data.SetName("mongodbatlas.disk.partition.throughput")
+ m.data.SetDescription("Disk throughput")
+ m.data.SetUnit("By/s")
+ m.data.SetEmptyGauge()
+ m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricMongodbatlasDiskPartitionThroughput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
+ if !m.config.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleValue(val)
+ dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMongodbatlasDiskPartitionThroughput) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMongodbatlasDiskPartitionThroughput) emit(metrics pmetric.MetricSlice) {
+ if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMongodbatlasDiskPartitionThroughput(cfg MetricConfig) metricMongodbatlasDiskPartitionThroughput {
+ m := metricMongodbatlasDiskPartitionThroughput{config: cfg}
+ if cfg.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
type metricMongodbatlasDiskPartitionUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
@@ -1491,6 +1621,57 @@ func newMetricMongodbatlasProcessCacheIo(cfg MetricConfig) metricMongodbatlasPro
return m
}
+type metricMongodbatlasProcessCacheRatio struct {
+ data pmetric.Metric // data buffer for generated metric.
+ config MetricConfig // metric config provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills mongodbatlas.process.cache.ratio metric with initial data.
+func (m *metricMongodbatlasProcessCacheRatio) init() {
+ m.data.SetName("mongodbatlas.process.cache.ratio")
+ m.data.SetDescription("Cache ratios represented as (%)")
+ m.data.SetUnit("%")
+ m.data.SetEmptyGauge()
+ m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricMongodbatlasProcessCacheRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cacheRatioTypeAttributeValue string) {
+ if !m.config.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleValue(val)
+ dp.Attributes().PutStr("cache_ratio_type", cacheRatioTypeAttributeValue)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMongodbatlasProcessCacheRatio) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMongodbatlasProcessCacheRatio) emit(metrics pmetric.MetricSlice) {
+ if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMongodbatlasProcessCacheRatio(cfg MetricConfig) metricMongodbatlasProcessCacheRatio {
+ m := metricMongodbatlasProcessCacheRatio{config: cfg}
+ if cfg.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
type metricMongodbatlasProcessCacheSize struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
@@ -3897,8 +4078,10 @@ type MetricsBuilder struct {
metricMongodbatlasDiskPartitionIopsMax metricMongodbatlasDiskPartitionIopsMax
metricMongodbatlasDiskPartitionLatencyAverage metricMongodbatlasDiskPartitionLatencyAverage
metricMongodbatlasDiskPartitionLatencyMax metricMongodbatlasDiskPartitionLatencyMax
+ metricMongodbatlasDiskPartitionQueueDepth metricMongodbatlasDiskPartitionQueueDepth
metricMongodbatlasDiskPartitionSpaceAverage metricMongodbatlasDiskPartitionSpaceAverage
metricMongodbatlasDiskPartitionSpaceMax metricMongodbatlasDiskPartitionSpaceMax
+ metricMongodbatlasDiskPartitionThroughput metricMongodbatlasDiskPartitionThroughput
metricMongodbatlasDiskPartitionUsageAverage metricMongodbatlasDiskPartitionUsageAverage
metricMongodbatlasDiskPartitionUsageMax metricMongodbatlasDiskPartitionUsageMax
metricMongodbatlasDiskPartitionUtilizationAverage metricMongodbatlasDiskPartitionUtilizationAverage
@@ -3906,6 +4089,7 @@ type MetricsBuilder struct {
metricMongodbatlasProcessAsserts metricMongodbatlasProcessAsserts
metricMongodbatlasProcessBackgroundFlush metricMongodbatlasProcessBackgroundFlush
metricMongodbatlasProcessCacheIo metricMongodbatlasProcessCacheIo
+ metricMongodbatlasProcessCacheRatio metricMongodbatlasProcessCacheRatio
metricMongodbatlasProcessCacheSize metricMongodbatlasProcessCacheSize
metricMongodbatlasProcessConnections metricMongodbatlasProcessConnections
metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage
@@ -3985,8 +4169,10 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt
metricMongodbatlasDiskPartitionIopsMax: newMetricMongodbatlasDiskPartitionIopsMax(mbc.Metrics.MongodbatlasDiskPartitionIopsMax),
metricMongodbatlasDiskPartitionLatencyAverage: newMetricMongodbatlasDiskPartitionLatencyAverage(mbc.Metrics.MongodbatlasDiskPartitionLatencyAverage),
metricMongodbatlasDiskPartitionLatencyMax: newMetricMongodbatlasDiskPartitionLatencyMax(mbc.Metrics.MongodbatlasDiskPartitionLatencyMax),
+ metricMongodbatlasDiskPartitionQueueDepth: newMetricMongodbatlasDiskPartitionQueueDepth(mbc.Metrics.MongodbatlasDiskPartitionQueueDepth),
metricMongodbatlasDiskPartitionSpaceAverage: newMetricMongodbatlasDiskPartitionSpaceAverage(mbc.Metrics.MongodbatlasDiskPartitionSpaceAverage),
metricMongodbatlasDiskPartitionSpaceMax: newMetricMongodbatlasDiskPartitionSpaceMax(mbc.Metrics.MongodbatlasDiskPartitionSpaceMax),
+ metricMongodbatlasDiskPartitionThroughput: newMetricMongodbatlasDiskPartitionThroughput(mbc.Metrics.MongodbatlasDiskPartitionThroughput),
metricMongodbatlasDiskPartitionUsageAverage: newMetricMongodbatlasDiskPartitionUsageAverage(mbc.Metrics.MongodbatlasDiskPartitionUsageAverage),
metricMongodbatlasDiskPartitionUsageMax: newMetricMongodbatlasDiskPartitionUsageMax(mbc.Metrics.MongodbatlasDiskPartitionUsageMax),
metricMongodbatlasDiskPartitionUtilizationAverage: newMetricMongodbatlasDiskPartitionUtilizationAverage(mbc.Metrics.MongodbatlasDiskPartitionUtilizationAverage),
@@ -3994,6 +4180,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt
metricMongodbatlasProcessAsserts: newMetricMongodbatlasProcessAsserts(mbc.Metrics.MongodbatlasProcessAsserts),
metricMongodbatlasProcessBackgroundFlush: newMetricMongodbatlasProcessBackgroundFlush(mbc.Metrics.MongodbatlasProcessBackgroundFlush),
metricMongodbatlasProcessCacheIo: newMetricMongodbatlasProcessCacheIo(mbc.Metrics.MongodbatlasProcessCacheIo),
+ metricMongodbatlasProcessCacheRatio: newMetricMongodbatlasProcessCacheRatio(mbc.Metrics.MongodbatlasProcessCacheRatio),
metricMongodbatlasProcessCacheSize: newMetricMongodbatlasProcessCacheSize(mbc.Metrics.MongodbatlasProcessCacheSize),
metricMongodbatlasProcessConnections: newMetricMongodbatlasProcessConnections(mbc.Metrics.MongodbatlasProcessConnections),
metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage: newMetricMongodbatlasProcessCPUChildrenNormalizedUsageAverage(mbc.Metrics.MongodbatlasProcessCPUChildrenNormalizedUsageAverage),
@@ -4197,8 +4384,10 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
mb.metricMongodbatlasDiskPartitionIopsMax.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionLatencyAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionLatencyMax.emit(ils.Metrics())
+ mb.metricMongodbatlasDiskPartitionQueueDepth.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionSpaceAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionSpaceMax.emit(ils.Metrics())
+ mb.metricMongodbatlasDiskPartitionThroughput.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionUtilizationAverage.emit(ils.Metrics())
@@ -4206,6 +4395,7 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
mb.metricMongodbatlasProcessAsserts.emit(ils.Metrics())
mb.metricMongodbatlasProcessBackgroundFlush.emit(ils.Metrics())
mb.metricMongodbatlasProcessCacheIo.emit(ils.Metrics())
+ mb.metricMongodbatlasProcessCacheRatio.emit(ils.Metrics())
mb.metricMongodbatlasProcessCacheSize.emit(ils.Metrics())
mb.metricMongodbatlasProcessConnections.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.emit(ils.Metrics())
@@ -4314,6 +4504,11 @@ func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts
mb.metricMongodbatlasDiskPartitionLatencyMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
+// RecordMongodbatlasDiskPartitionQueueDepthDataPoint adds a data point to mongodbatlas.disk.partition.queue.depth metric.
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricMongodbatlasDiskPartitionQueueDepth.recordDataPoint(mb.startTime, ts, val)
+}
+
// RecordMongodbatlasDiskPartitionSpaceAverageDataPoint adds a data point to mongodbatlas.disk.partition.space.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
mb.metricMongodbatlasDiskPartitionSpaceAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
@@ -4324,6 +4519,11 @@ func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts pc
mb.metricMongodbatlasDiskPartitionSpaceMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
+// RecordMongodbatlasDiskPartitionThroughputDataPoint adds a data point to mongodbatlas.disk.partition.throughput metric.
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionThroughputDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
+ mb.metricMongodbatlasDiskPartitionThroughput.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
+}
+
// RecordMongodbatlasDiskPartitionUsageAverageDataPoint adds a data point to mongodbatlas.disk.partition.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
mb.metricMongodbatlasDiskPartitionUsageAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
@@ -4359,6 +4559,11 @@ func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheIoDataPoint(ts pcommon.T
mb.metricMongodbatlasProcessCacheIo.recordDataPoint(mb.startTime, ts, val, cacheDirectionAttributeValue.String())
}
+// RecordMongodbatlasProcessCacheRatioDataPoint adds a data point to mongodbatlas.process.cache.ratio metric.
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheRatioDataPoint(ts pcommon.Timestamp, val float64, cacheRatioTypeAttributeValue AttributeCacheRatioType) {
+ mb.metricMongodbatlasProcessCacheRatio.recordDataPoint(mb.startTime, ts, val, cacheRatioTypeAttributeValue.String())
+}
+
// RecordMongodbatlasProcessCacheSizeDataPoint adds a data point to mongodbatlas.process.cache.size metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheSizeDataPoint(ts pcommon.Timestamp, val float64, cacheStatusAttributeValue AttributeCacheStatus) {
mb.metricMongodbatlasProcessCacheSize.recordDataPoint(mb.startTime, ts, val, cacheStatusAttributeValue.String())
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go
index 6a8d5c246a0e..81ef8e032756 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go
@@ -92,6 +92,9 @@ func TestMetricsBuilder(t *testing.T) {
allMetricsCount++
mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, 1, AttributeDiskDirectionRead)
+ allMetricsCount++
+ mb.RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts, 1)
+
defaultMetricsCount++
allMetricsCount++
mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, 1, AttributeDiskStatusFree)
@@ -100,6 +103,9 @@ func TestMetricsBuilder(t *testing.T) {
allMetricsCount++
mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, 1, AttributeDiskStatusFree)
+ allMetricsCount++
+ mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, 1, AttributeDiskDirectionRead)
+
defaultMetricsCount++
allMetricsCount++
mb.RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts, 1, AttributeDiskStatusFree)
@@ -128,6 +134,9 @@ func TestMetricsBuilder(t *testing.T) {
allMetricsCount++
mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, 1, AttributeCacheDirectionReadInto)
+ allMetricsCount++
+ mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, 1, AttributeCacheRatioTypeCacheFill)
+
defaultMetricsCount++
allMetricsCount++
mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, 1, AttributeCacheStatusDirty)
@@ -442,6 +451,18 @@ func TestMetricsBuilder(t *testing.T) {
attrVal, ok := dp.Attributes().Get("disk_direction")
assert.True(t, ok)
assert.EqualValues(t, "read", attrVal.Str())
+ case "mongodbatlas.disk.partition.queue.depth":
+ assert.False(t, validatedMetrics["mongodbatlas.disk.partition.queue.depth"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.queue.depth")
+ validatedMetrics["mongodbatlas.disk.partition.queue.depth"] = true
+ assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+ assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+ assert.Equal(t, "Disk queue depth", ms.At(i).Description())
+ assert.Equal(t, "1", ms.At(i).Unit())
+ dp := ms.At(i).Gauge().DataPoints().At(0)
+ assert.Equal(t, start, dp.StartTimestamp())
+ assert.Equal(t, ts, dp.Timestamp())
+ assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
+ assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01)
case "mongodbatlas.disk.partition.space.average":
assert.False(t, validatedMetrics["mongodbatlas.disk.partition.space.average"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.space.average")
validatedMetrics["mongodbatlas.disk.partition.space.average"] = true
@@ -472,6 +493,21 @@ func TestMetricsBuilder(t *testing.T) {
attrVal, ok := dp.Attributes().Get("disk_status")
assert.True(t, ok)
assert.EqualValues(t, "free", attrVal.Str())
+ case "mongodbatlas.disk.partition.throughput":
+ assert.False(t, validatedMetrics["mongodbatlas.disk.partition.throughput"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.throughput")
+ validatedMetrics["mongodbatlas.disk.partition.throughput"] = true
+ assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+ assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+ assert.Equal(t, "Disk throughput", ms.At(i).Description())
+ assert.Equal(t, "By/s", ms.At(i).Unit())
+ dp := ms.At(i).Gauge().DataPoints().At(0)
+ assert.Equal(t, start, dp.StartTimestamp())
+ assert.Equal(t, ts, dp.Timestamp())
+ assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
+ assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01)
+ attrVal, ok := dp.Attributes().Get("disk_direction")
+ assert.True(t, ok)
+ assert.EqualValues(t, "read", attrVal.Str())
case "mongodbatlas.disk.partition.usage.average":
assert.False(t, validatedMetrics["mongodbatlas.disk.partition.usage.average"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.usage.average")
validatedMetrics["mongodbatlas.disk.partition.usage.average"] = true
@@ -568,6 +604,21 @@ func TestMetricsBuilder(t *testing.T) {
attrVal, ok := dp.Attributes().Get("cache_direction")
assert.True(t, ok)
assert.EqualValues(t, "read_into", attrVal.Str())
+ case "mongodbatlas.process.cache.ratio":
+ assert.False(t, validatedMetrics["mongodbatlas.process.cache.ratio"], "Found a duplicate in the metrics slice: mongodbatlas.process.cache.ratio")
+ validatedMetrics["mongodbatlas.process.cache.ratio"] = true
+ assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+ assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+ assert.Equal(t, "Cache ratios represented as (%)", ms.At(i).Description())
+ assert.Equal(t, "%", ms.At(i).Unit())
+ dp := ms.At(i).Gauge().DataPoints().At(0)
+ assert.Equal(t, start, dp.StartTimestamp())
+ assert.Equal(t, ts, dp.Timestamp())
+ assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
+ assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01)
+ attrVal, ok := dp.Attributes().Get("cache_ratio_type")
+ assert.True(t, ok)
+ assert.EqualValues(t, "cache_fill", attrVal.Str())
case "mongodbatlas.process.cache.size":
assert.False(t, validatedMetrics["mongodbatlas.process.cache.size"], "Found a duplicate in the metrics slice: mongodbatlas.process.cache.size")
validatedMetrics["mongodbatlas.process.cache.size"] = true
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go
index e8793b609b3c..6ddb5bd840d8 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go
@@ -152,6 +152,16 @@ func getRecordFunc(metricName string) metricRecordFunc {
mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatusUsed)
}
+ case "CACHE_FILL_RATIO":
+ return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+ mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, float64(*dp.Value), AttributeCacheRatioTypeCacheFill)
+ }
+
+ case "DIRTY_FILL_RATIO":
+ return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+ mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, float64(*dp.Value), AttributeCacheRatioTypeDirtyFill)
+ }
+
case "TICKETS_AVAILABLE_READS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketTypeAvailableReads)
@@ -337,6 +347,10 @@ func getRecordFunc(metricName string) metricRecordFunc {
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationInsert, AttributeClusterRolePrimary)
}
+ case "OPCOUNTER_TTL_DELETED":
+ return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationTTLDeleted, AttributeClusterRolePrimary)
+ }
// Rate of database operations on MongoDB secondaries found in the opcountersRepl document that the serverStatus command collects.
case "OPCOUNTER_REPL_CMD":
@@ -735,6 +749,29 @@ func getRecordFunc(metricName string) metricRecordFunc {
mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
}
+ // Measures throughput of data read and written to the disk partition (not cache) used by MongoDB.
+ case "DISK_PARTITION_THROUGHPUT_READ":
+ return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+ mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
+ }
+
+ case "DISK_PARTITION_THROUGHPUT_WRITE":
+ return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+ mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
+ }
+
+ // This is a calculated metric that is the sum of the read and write throughput.
+ case "DISK_PARTITION_THROUGHPUT_TOTAL":
+ return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+ mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
+ }
+
+ // Measures the queue depth of the disk partition used by MongoDB.
+ case "DISK_QUEUE_DEPTH":
+ return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+ mb.RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts, float64(*dp.Value))
+ }
+
// Measures latency per operation type of the disk partition used by MongoDB.
case "DISK_PARTITION_LATENCY_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
@@ -849,7 +886,7 @@ func getRecordFunc(metricName string) metricRecordFunc {
}
}
-func MeasurementsToMetric(mb *MetricsBuilder, meas *mongodbatlas.Measurements, _ bool) error {
+func MeasurementsToMetric(mb *MetricsBuilder, meas *mongodbatlas.Measurements) error {
recordFunc := getRecordFunc(meas.Name)
if recordFunc == nil {
return nil
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml
index cba17b3284e7..6a88579926fc 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml
+++ b/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml
@@ -13,10 +13,14 @@ all_set:
enabled: true
mongodbatlas.disk.partition.latency.max:
enabled: true
+ mongodbatlas.disk.partition.queue.depth:
+ enabled: true
mongodbatlas.disk.partition.space.average:
enabled: true
mongodbatlas.disk.partition.space.max:
enabled: true
+ mongodbatlas.disk.partition.throughput:
+ enabled: true
mongodbatlas.disk.partition.usage.average:
enabled: true
mongodbatlas.disk.partition.usage.max:
@@ -31,6 +35,8 @@ all_set:
enabled: true
mongodbatlas.process.cache.io:
enabled: true
+ mongodbatlas.process.cache.ratio:
+ enabled: true
mongodbatlas.process.cache.size:
enabled: true
mongodbatlas.process.connections:
@@ -166,10 +172,14 @@ none_set:
enabled: false
mongodbatlas.disk.partition.latency.max:
enabled: false
+ mongodbatlas.disk.partition.queue.depth:
+ enabled: false
mongodbatlas.disk.partition.space.average:
enabled: false
mongodbatlas.disk.partition.space.max:
enabled: false
+ mongodbatlas.disk.partition.throughput:
+ enabled: false
mongodbatlas.disk.partition.usage.average:
enabled: false
mongodbatlas.disk.partition.usage.max:
@@ -184,6 +194,8 @@ none_set:
enabled: false
mongodbatlas.process.cache.io:
enabled: false
+ mongodbatlas.process.cache.ratio:
+ enabled: false
mongodbatlas.process.cache.size:
enabled: false
mongodbatlas.process.connections:
diff --git a/receiver/mongodbatlasreceiver/internal/metric_conversion.go b/receiver/mongodbatlasreceiver/internal/metric_conversion.go
index b1c5639d6660..1b5b156d1c2f 100644
--- a/receiver/mongodbatlasreceiver/internal/metric_conversion.go
+++ b/receiver/mongodbatlasreceiver/internal/metric_conversion.go
@@ -19,15 +19,79 @@ func processMeasurements(
var errs error
for _, meas := range measurements {
- err := metadata.MeasurementsToMetric(mb, meas, false)
+ err := metadata.MeasurementsToMetric(mb, meas)
if err != nil {
errs = multierr.Append(errs, err)
}
}
+ err := calculateTotalMetrics(mb, measurements)
+ if err != nil {
+ errs = multierr.Append(errs, err)
+ }
if errs != nil {
return fmt.Errorf("errors occurred while processing measurements: %w", errs)
}
return nil
}
+
+func calculateTotalMetrics(
+ mb *metadata.MetricsBuilder,
+ measurements []*mongodbatlas.Measurements,
+) error {
+ var err error
+ dptTotalMeasCombined := false
+ var dptTotalMeas *mongodbatlas.Measurements
+
+ for _, meas := range measurements {
+ switch meas.Name {
+ case "DISK_PARTITION_THROUGHPUT_READ", "DISK_PARTITION_THROUGHPUT_WRITE":
+ if dptTotalMeas == nil {
+ dptTotalMeas = cloneMeasurement(meas)
+ dptTotalMeas.Name = "DISK_PARTITION_THROUGHPUT_TOTAL"
+ continue
+ }
+
+ // Combine data point values with matching timestamps
+ for j, totalMeas := range dptTotalMeas.DataPoints {
+ if totalMeas.Timestamp != meas.DataPoints[j].Timestamp ||
+ (totalMeas.Value == nil && meas.DataPoints[j].Value == nil) {
+ continue
+ }
+ if totalMeas.Value == nil {
+ totalMeas.Value = new(float32)
+ }
+ addValue := *meas.DataPoints[j].Value
+ if meas.DataPoints[j].Value == nil {
+ addValue = 0
+ }
+ *totalMeas.Value += addValue
+ dptTotalMeasCombined = true
+ }
+ default:
+ }
+ }
+
+ if dptTotalMeasCombined {
+ err = metadata.MeasurementsToMetric(mb, dptTotalMeas)
+ }
+ return err
+}
+
+func cloneMeasurement(meas *mongodbatlas.Measurements) *mongodbatlas.Measurements {
+ clone := &mongodbatlas.Measurements{
+ Name: meas.Name,
+ Units: meas.Units,
+ DataPoints: make([]*mongodbatlas.DataPoints, len(meas.DataPoints)),
+ }
+
+ for i, dp := range meas.DataPoints {
+ if dp != nil {
+ newDP := *dp
+ clone.DataPoints[i] = &newDP
+ }
+ }
+
+ return clone
+}
diff --git a/receiver/mongodbatlasreceiver/metadata.yaml b/receiver/mongodbatlasreceiver/metadata.yaml
index 74e8b97e1d78..d2c84af480d0 100644
--- a/receiver/mongodbatlasreceiver/metadata.yaml
+++ b/receiver/mongodbatlasreceiver/metadata.yaml
@@ -90,6 +90,12 @@ attributes:
enum:
- read_into
- written_from
+ cache_ratio_type:
+ description: Cache ratio type
+ type: string
+ enum:
+ - cache_fill
+ - dirty_fill
cache_status:
description: Cache status
type: string
@@ -165,6 +171,7 @@ attributes:
- getmore
- insert
- scan_and_order
+ - ttl_deleted
cluster_role:
description: Whether process is acting as replica or primary
type: string
@@ -258,6 +265,14 @@ metrics:
attributes: [cache_direction]
gauge:
value_type: double
+ mongodbatlas.process.cache.ratio:
+ enabled: false
+ description: Cache ratios represented as (%)
+ extended_documentation: Aggregate of MongoDB Metrics CACHE_FILL_RATIO, DIRTY_FILL_RATIO
+ unit: "%"
+ attributes: [cache_ratio_type]
+ gauge:
+ value_type: double
mongodbatlas.process.cache.size:
enabled: true
description: Cache sizes
@@ -452,7 +467,7 @@ metrics:
mongodbatlas.process.db.operations.rate:
enabled: true
description: DB Operation Rates
- extended_documentation: Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT
+ extended_documentation: Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT, OPCOUNTER_TTL_DELETED
unit: "{operations}/s"
attributes: [operation, cluster_role]
gauge:
@@ -618,6 +633,14 @@ metrics:
attributes: [disk_direction]
gauge:
value_type: double
+ mongodbatlas.disk.partition.throughput:
+ enabled: false
+ description: Disk throughput
+ extended_documentation: Aggregate of MongoDB Metrics DISK_PARTITION_THROUGHPUT_READ, DISK_PARTITION_THROUGHPUT_WRITE
+ unit: By/s
+ attributes: [disk_direction]
+ gauge:
+ value_type: double
mongodbatlas.disk.partition.usage.max:
enabled: true
description: Disk partition usage (%)
@@ -680,6 +703,14 @@ metrics:
attributes: [disk_status]
gauge:
value_type: double
+ mongodbatlas.disk.partition.queue.depth:
+ enabled: false
+ description: Disk queue depth
+ extended_documentation: Aggregate of MongoDB Metrics DISK_QUEUE_DEPTH
+ unit: "1"
+ attributes: []
+ gauge:
+ value_type: double
mongodbatlas.db.size:
enabled: true
description: Database feature size
diff --git a/receiver/postgresqlreceiver/client.go b/receiver/postgresqlreceiver/client.go
index dc0029873a10..6f02ddc63e71 100644
--- a/receiver/postgresqlreceiver/client.go
+++ b/receiver/postgresqlreceiver/client.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"net"
+ "strconv"
"strings"
"time"
@@ -56,6 +57,7 @@ type client interface {
getMaxConnections(ctx context.Context) (int64, error)
getIndexStats(ctx context.Context, database string) (map[indexIdentifer]indexStat, error)
listDatabases(ctx context.Context) ([]string, error)
+ getVersion(ctx context.Context) (string, error)
}
type postgreSQLClient struct {
@@ -442,7 +444,6 @@ type bgStat struct {
checkpointWriteTime float64
checkpointSyncTime float64
bgWrites int64
- backendWrites int64
bufferBackendWrites int64
bufferFsyncWrites int64
bufferCheckpoints int64
@@ -451,54 +452,105 @@ type bgStat struct {
}
func (c *postgreSQLClient) getBGWriterStats(ctx context.Context) (*bgStat, error) {
- query := `SELECT
- checkpoints_req AS checkpoint_req,
- checkpoints_timed AS checkpoint_scheduled,
- checkpoint_write_time AS checkpoint_duration_write,
- checkpoint_sync_time AS checkpoint_duration_sync,
- buffers_clean AS bg_writes,
- buffers_backend AS backend_writes,
- buffers_backend_fsync AS buffers_written_fsync,
- buffers_checkpoint AS buffers_checkpoints,
- buffers_alloc AS buffers_allocated,
- maxwritten_clean AS maxwritten_count
- FROM pg_stat_bgwriter;`
+ version, err := c.getVersion(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ major, err := parseMajorVersion(version)
+ if err != nil {
+ return nil, err
+ }
- row := c.client.QueryRowContext(ctx, query)
var (
checkpointsReq, checkpointsScheduled int64
checkpointSyncTime, checkpointWriteTime float64
bgWrites, bufferCheckpoints, bufferAllocated int64
bufferBackendWrites, bufferFsyncWrites, maxWritten int64
)
- err := row.Scan(
- &checkpointsReq,
- &checkpointsScheduled,
- &checkpointWriteTime,
- &checkpointSyncTime,
- &bgWrites,
- &bufferBackendWrites,
- &bufferFsyncWrites,
- &bufferCheckpoints,
- &bufferAllocated,
- &maxWritten,
- )
- if err != nil {
- return nil, err
+
+ if major < 17 {
+ query := `SELECT
+ checkpoints_req AS checkpoint_req,
+ checkpoints_timed AS checkpoint_scheduled,
+ checkpoint_write_time AS checkpoint_duration_write,
+ checkpoint_sync_time AS checkpoint_duration_sync,
+ buffers_clean AS bg_writes,
+ buffers_backend AS backend_writes,
+ buffers_backend_fsync AS buffers_written_fsync,
+ buffers_checkpoint AS buffers_checkpoints,
+ buffers_alloc AS buffers_allocated,
+ maxwritten_clean AS maxwritten_count
+ FROM pg_stat_bgwriter;`
+
+ row := c.client.QueryRowContext(ctx, query)
+
+ if err = row.Scan(
+ &checkpointsReq,
+ &checkpointsScheduled,
+ &checkpointWriteTime,
+ &checkpointSyncTime,
+ &bgWrites,
+ &bufferBackendWrites,
+ &bufferFsyncWrites,
+ &bufferCheckpoints,
+ &bufferAllocated,
+ &maxWritten,
+ ); err != nil {
+ return nil, err
+ }
+ return &bgStat{
+ checkpointsReq: checkpointsReq,
+ checkpointsScheduled: checkpointsScheduled,
+ checkpointWriteTime: checkpointWriteTime,
+ checkpointSyncTime: checkpointSyncTime,
+ bgWrites: bgWrites,
+ bufferBackendWrites: bufferBackendWrites,
+ bufferFsyncWrites: bufferFsyncWrites,
+ bufferCheckpoints: bufferCheckpoints,
+ buffersAllocated: bufferAllocated,
+ maxWritten: maxWritten,
+ }, nil
+ } else {
+ query := `SELECT
+ cp.num_requested AS checkpoint_req,
+ cp.num_timed AS checkpoint_scheduled,
+ cp.write_time AS checkpoint_duration_write,
+ cp.sync_time AS checkpoint_duration_sync,
+ cp.buffers_written AS buffers_checkpoints,
+ bg.buffers_clean AS bg_writes,
+ bg.buffers_alloc AS buffers_allocated,
+ bg.maxwritten_clean AS maxwritten_count
+ FROM pg_stat_bgwriter bg, pg_stat_checkpointer cp;`
+
+ row := c.client.QueryRowContext(ctx, query)
+
+ if err = row.Scan(
+ &checkpointsReq,
+ &checkpointsScheduled,
+ &checkpointWriteTime,
+ &checkpointSyncTime,
+ &bufferCheckpoints,
+ &bgWrites,
+ &bufferAllocated,
+ &maxWritten,
+ ); err != nil {
+ return nil, err
+ }
+
+ return &bgStat{
+ checkpointsReq: checkpointsReq,
+ checkpointsScheduled: checkpointsScheduled,
+ checkpointWriteTime: checkpointWriteTime,
+ checkpointSyncTime: checkpointSyncTime,
+ bgWrites: bgWrites,
+ bufferBackendWrites: -1, // Not found in pg17+ tables
+ bufferFsyncWrites: -1, // Not found in pg17+ tables
+ bufferCheckpoints: bufferCheckpoints,
+ buffersAllocated: bufferAllocated,
+ maxWritten: maxWritten,
+ }, nil
}
- return &bgStat{
- checkpointsReq: checkpointsReq,
- checkpointsScheduled: checkpointsScheduled,
- checkpointWriteTime: checkpointWriteTime,
- checkpointSyncTime: checkpointSyncTime,
- bgWrites: bgWrites,
- backendWrites: bufferBackendWrites,
- bufferBackendWrites: bufferBackendWrites,
- bufferFsyncWrites: bufferFsyncWrites,
- bufferCheckpoints: bufferCheckpoints,
- buffersAllocated: bufferAllocated,
- maxWritten: maxWritten,
- }, nil
}
func (c *postgreSQLClient) getMaxConnections(ctx context.Context) (int64, error) {
@@ -641,6 +693,23 @@ func (c *postgreSQLClient) listDatabases(ctx context.Context) ([]string, error)
return databases, nil
}
+func (c *postgreSQLClient) getVersion(ctx context.Context) (string, error) {
+ query := "SHOW server_version;"
+ row := c.client.QueryRowContext(ctx, query)
+ var version string
+ err := row.Scan(&version)
+ return version, err
+}
+
+func parseMajorVersion(ver string) (int, error) {
+ parts := strings.Split(ver, ".")
+ if len(parts) < 2 {
+ return 0, fmt.Errorf("unexpected version string: %s", ver)
+ }
+
+ return strconv.Atoi(parts[0])
+}
+
func filterQueryByDatabases(baseQuery string, databases []string, groupBy bool) string {
if len(databases) > 0 {
var queryDatabases []string
diff --git a/receiver/postgresqlreceiver/integration_test.go b/receiver/postgresqlreceiver/integration_test.go
index fb290be1095c..a95199536142 100644
--- a/receiver/postgresqlreceiver/integration_test.go
+++ b/receiver/postgresqlreceiver/integration_test.go
@@ -6,6 +6,7 @@
package postgresqlreceiver
import (
+ "fmt"
"net"
"path/filepath"
"testing"
@@ -22,37 +23,44 @@ import (
const postgresqlPort = "5432"
+const (
+ pre17TestVersion = "13.18"
+ post17TestVersion = "17.2"
+)
+
func TestIntegration(t *testing.T) {
defer testutil.SetFeatureGateForTest(t, separateSchemaAttrGate, false)()
defer testutil.SetFeatureGateForTest(t, connectionPoolGate, false)()
- t.Run("single_db", integrationTest("single_db", []string{"otel"}))
- t.Run("multi_db", integrationTest("multi_db", []string{"otel", "otel2"}))
- t.Run("all_db", integrationTest("all_db", []string{}))
+ t.Run("single_db", integrationTest("single_db", []string{"otel"}, pre17TestVersion))
+ t.Run("multi_db", integrationTest("multi_db", []string{"otel", "otel2"}, pre17TestVersion))
+ t.Run("all_db", integrationTest("all_db", []string{}, pre17TestVersion))
+
+ t.Run("single_db_post17", integrationTest("single_db_post17", []string{"otel"}, post17TestVersion))
}
func TestIntegrationWithSeparateSchemaAttr(t *testing.T) {
defer testutil.SetFeatureGateForTest(t, separateSchemaAttrGate, true)()
defer testutil.SetFeatureGateForTest(t, connectionPoolGate, false)()
- t.Run("single_db_schemaattr", integrationTest("single_db_schemaattr", []string{"otel"}))
- t.Run("multi_db_schemaattr", integrationTest("multi_db_schemaattr", []string{"otel", "otel2"}))
- t.Run("all_db_schemaattr", integrationTest("all_db_schemaattr", []string{}))
+ t.Run("single_db_schemaattr", integrationTest("single_db_schemaattr", []string{"otel"}, pre17TestVersion))
+ t.Run("multi_db_schemaattr", integrationTest("multi_db_schemaattr", []string{"otel", "otel2"}, pre17TestVersion))
+ t.Run("all_db_schemaattr", integrationTest("all_db_schemaattr", []string{}, pre17TestVersion))
}
func TestIntegrationWithConnectionPool(t *testing.T) {
defer testutil.SetFeatureGateForTest(t, separateSchemaAttrGate, false)()
defer testutil.SetFeatureGateForTest(t, connectionPoolGate, true)()
- t.Run("single_db_connpool", integrationTest("single_db_connpool", []string{"otel"}))
- t.Run("multi_db_connpool", integrationTest("multi_db_connpool", []string{"otel", "otel2"}))
- t.Run("all_db_connpool", integrationTest("all_db_connpool", []string{}))
+ t.Run("single_db_connpool", integrationTest("single_db_connpool", []string{"otel"}, pre17TestVersion))
+ t.Run("multi_db_connpool", integrationTest("multi_db_connpool", []string{"otel", "otel2"}, pre17TestVersion))
+ t.Run("all_db_connpool", integrationTest("all_db_connpool", []string{}, pre17TestVersion))
}
-func integrationTest(name string, databases []string) func(*testing.T) {
+func integrationTest(name string, databases []string, pgVersion string) func(*testing.T) {
expectedFile := filepath.Join("testdata", "integration", "expected_"+name+".yaml")
return scraperinttest.NewIntegrationTest(
NewFactory(),
scraperinttest.WithContainerRequest(
testcontainers.ContainerRequest{
- Image: "postgres:13.18",
+ Image: fmt.Sprintf("postgres:%s", pgVersion),
Env: map[string]string{
"POSTGRES_USER": "root",
"POSTGRES_PASSWORD": "otel",
diff --git a/receiver/postgresqlreceiver/scraper.go b/receiver/postgresqlreceiver/scraper.go
index 9cc13f638c44..871153108d8f 100644
--- a/receiver/postgresqlreceiver/scraper.go
+++ b/receiver/postgresqlreceiver/scraper.go
@@ -304,9 +304,13 @@ func (p *postgreSQLScraper) collectBGWriterStats(
p.mb.RecordPostgresqlBgwriterBuffersAllocatedDataPoint(now, bgStats.buffersAllocated)
p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bgWrites, metadata.AttributeBgBufferSourceBgwriter)
- p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bufferBackendWrites, metadata.AttributeBgBufferSourceBackend)
+ if bgStats.bufferBackendWrites >= 0 {
+ p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bufferBackendWrites, metadata.AttributeBgBufferSourceBackend)
+ }
p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bufferCheckpoints, metadata.AttributeBgBufferSourceCheckpoints)
- p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bufferFsyncWrites, metadata.AttributeBgBufferSourceBackendFsync)
+ if bgStats.bufferFsyncWrites >= 0 {
+ p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bufferFsyncWrites, metadata.AttributeBgBufferSourceBackendFsync)
+ }
p.mb.RecordPostgresqlBgwriterCheckpointCountDataPoint(now, bgStats.checkpointsReq, metadata.AttributeBgCheckpointTypeRequested)
p.mb.RecordPostgresqlBgwriterCheckpointCountDataPoint(now, bgStats.checkpointsScheduled, metadata.AttributeBgCheckpointTypeScheduled)
diff --git a/receiver/postgresqlreceiver/scraper_test.go b/receiver/postgresqlreceiver/scraper_test.go
index 86457712eb21..15165c2a01f4 100644
--- a/receiver/postgresqlreceiver/scraper_test.go
+++ b/receiver/postgresqlreceiver/scraper_test.go
@@ -452,6 +452,11 @@ func (m *mockClient) listDatabases(_ context.Context) ([]string, error) {
return args.Get(0).([]string), args.Error(1)
}
+func (m *mockClient) getVersion(_ context.Context) (string, error) {
+ args := m.Called()
+ return args.String(0), args.Error(1)
+}
+
func (m *mockClientFactory) getClient(database string) (client, error) {
args := m.Called(database)
return args.Get(0).(client), args.Error(1)
@@ -511,7 +516,6 @@ func (m *mockClient) initMocks(database string, schema string, databases []strin
checkpointWriteTime: 3.12,
checkpointSyncTime: 4.23,
bgWrites: 5,
- backendWrites: 6,
bufferBackendWrites: 7,
bufferFsyncWrites: 8,
bufferCheckpoints: 9,
diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_single_db_post17.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_post17.yaml
new file mode 100644
index 000000000000..85f81fc5f0e3
--- /dev/null
+++ b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_post17.yaml
@@ -0,0 +1,703 @@
+resourceMetrics:
+ - resource: {}
+ scopeMetrics:
+ - metrics:
+ - description: Number of buffers allocated.
+ name: postgresql.bgwriter.buffers.allocated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "289"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{buffers}'
+ - description: Number of buffers written.
+ name: postgresql.bgwriter.buffers.writes
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: bgwriter
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "60"
+ attributes:
+ - key: source
+ value:
+ stringValue: checkpoints
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{buffers}'
+ - description: The number of checkpoints performed.
+ name: postgresql.bgwriter.checkpoint.count
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "4"
+ attributes:
+ - key: type
+ value:
+ stringValue: requested
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: type
+ value:
+ stringValue: scheduled
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{checkpoints}'
+ - description: Total time spent writing and syncing files to disk by checkpoints.
+ name: postgresql.bgwriter.duration
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asDouble: 14
+ attributes:
+ - key: type
+ value:
+ stringValue: sync
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 7
+ attributes:
+ - key: type
+ value:
+ stringValue: write
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: ms
+ - description: Number of times the background writer stopped a cleaning scan because it had written too many buffers.
+ name: postgresql.bgwriter.maxwritten
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: "1"
+ - description: Configured maximum number of client connections allowed
+ gauge:
+ dataPoints:
+ - asInt: "100"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: postgresql.connection.max
+ unit: '{connections}'
+ - description: Number of user databases.
+ name: postgresql.database.count
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: '{databases}'
+ - description: The number of database locks.
+ gauge:
+ dataPoints:
+ - asInt: "1"
+ attributes:
+ - key: lock_type
+ value:
+ stringValue: relation
+ - key: mode
+ value:
+ stringValue: AccessShareLock
+ - key: relation
+ value:
+ stringValue: pg_class
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "1"
+ attributes:
+ - key: lock_type
+ value:
+ stringValue: relation
+ - key: mode
+ value:
+ stringValue: AccessShareLock
+ - key: relation
+ value:
+ stringValue: pg_class_oid_index
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "1"
+ attributes:
+ - key: lock_type
+ value:
+ stringValue: relation
+ - key: mode
+ value:
+ stringValue: AccessShareLock
+ - key: relation
+ value:
+ stringValue: pg_class_relname_nsp_index
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "1"
+ attributes:
+ - key: lock_type
+ value:
+ stringValue: relation
+ - key: mode
+ value:
+ stringValue: AccessShareLock
+ - key: relation
+ value:
+ stringValue: pg_class_tblspc_relfilenode_index
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "1"
+ attributes:
+ - key: lock_type
+ value:
+ stringValue: relation
+ - key: mode
+ value:
+ stringValue: AccessShareLock
+ - key: relation
+ value:
+ stringValue: pg_locks
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: postgresql.database.locks
+ unit: '{lock}'
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
+ version: latest
+ - resource:
+ attributes:
+ - key: postgresql.database.name
+ value:
+ stringValue: otel
+ scopeMetrics:
+ - metrics:
+ - description: Number of times disk blocks were found already in the buffer cache.
+ name: postgresql.blks_hit
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_hit}'
+ - description: Number of disk blocks read in this database.
+ name: postgresql.blks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{blks_read}'
+ - description: The number of commits.
+ name: postgresql.commits
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "6"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: "1"
+ - description: The database disk usage.
+ name: postgresql.db_size
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "7184900"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: The number of deadlocks.
+ name: postgresql.deadlocks
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{deadlock}'
+ - description: The number of rollbacks.
+ name: postgresql.rollbacks
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: "1"
+ - description: Number of user tables in a database.
+ name: postgresql.table.count
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "2"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: '{table}'
+ - description: The number of temp files.
+ name: postgresql.temp_files
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{temp_file}'
+ - description: Number of rows deleted by queries in the database.
+ name: postgresql.tup_deleted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_deleted}'
+ - description: Number of rows fetched by queries in the database.
+ name: postgresql.tup_fetched
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_fetched}'
+ - description: Number of rows inserted by queries in the database.
+ name: postgresql.tup_inserted
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_inserted}'
+ - description: Number of rows returned by queries in the database.
+ name: postgresql.tup_returned
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_returned}'
+ - description: Number of rows updated by queries in the database.
+ name: postgresql.tup_updated
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{tup_updated}'
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
+ version: latest
+ - resource:
+ attributes:
+ - key: postgresql.database.name
+ value:
+ stringValue: otel
+ - key: postgresql.table.name
+ value:
+ stringValue: public.table1
+ scopeMetrics:
+ - metrics:
+ - description: The number of blocks read.
+ name: postgresql.blocks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: heap_hit
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: heap_read
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: idx_hit
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: idx_read
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: tidx_hit
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: tidx_read
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: toast_hit
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: toast_read
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: "1"
+ - description: The number of db row operations.
+ name: postgresql.operations
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ attributes:
+ - key: operation
+ value:
+ stringValue: del
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: operation
+ value:
+ stringValue: hot_upd
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: operation
+ value:
+ stringValue: ins
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: operation
+ value:
+ stringValue: upd
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: "1"
+ - description: The number of rows in the database.
+ name: postgresql.rows
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ attributes:
+ - key: state
+ value:
+ stringValue: dead
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: state
+ value:
+ stringValue: live
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: "1"
+ - description: The number of sequential scans.
+ name: postgresql.sequential_scans
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{sequential_scan}'
+ - description: Disk space used by a table.
+ name: postgresql.table.size
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: Number of times a table has manually been vacuumed.
+ name: postgresql.table.vacuum.count
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{vacuums}'
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
+ version: latest
+ - resource:
+ attributes:
+ - key: postgresql.database.name
+ value:
+ stringValue: otel
+ - key: postgresql.table.name
+ value:
+ stringValue: public.table2
+ scopeMetrics:
+ - metrics:
+ - description: The number of blocks read.
+ name: postgresql.blocks_read
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: heap_hit
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: heap_read
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: idx_hit
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: idx_read
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: tidx_hit
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: tidx_read
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: toast_hit
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: source
+ value:
+ stringValue: toast_read
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: "1"
+ - description: The number of db row operations.
+ name: postgresql.operations
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ attributes:
+ - key: operation
+ value:
+ stringValue: del
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: operation
+ value:
+ stringValue: hot_upd
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: operation
+ value:
+ stringValue: ins
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: operation
+ value:
+ stringValue: upd
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: "1"
+ - description: The number of rows in the database.
+ name: postgresql.rows
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ attributes:
+ - key: state
+ value:
+ stringValue: dead
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: state
+ value:
+ stringValue: live
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: "1"
+ - description: The number of sequential scans.
+ name: postgresql.sequential_scans
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{sequential_scan}'
+ - description: Disk space used by a table.
+ name: postgresql.table.size
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: Number of times a table has manually been vacuumed.
+ name: postgresql.table.vacuum.count
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{vacuums}'
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
+ version: latest
+ - resource:
+ attributes:
+ - key: postgresql.database.name
+ value:
+ stringValue: otel
+ - key: postgresql.index.name
+ value:
+ stringValue: table1_pkey
+ - key: postgresql.table.name
+ value:
+ stringValue: table1
+ scopeMetrics:
+ - metrics:
+ - description: The number of index scans on a table.
+ name: postgresql.index.scans
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{scans}'
+ - description: The size of the index on disk.
+ gauge:
+ dataPoints:
+ - asInt: "8192"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: postgresql.index.size
+ unit: By
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
+ version: latest
+ - resource:
+ attributes:
+ - key: postgresql.database.name
+ value:
+ stringValue: otel
+ - key: postgresql.index.name
+ value:
+ stringValue: table2_pkey
+ - key: postgresql.table.name
+ value:
+ stringValue: table2
+ scopeMetrics:
+ - metrics:
+ - description: The number of index scans on a table.
+ name: postgresql.index.scans
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "0"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ isMonotonic: true
+ unit: '{scans}'
+ - description: The size of the index on disk.
+ gauge:
+ dataPoints:
+ - asInt: "8192"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: postgresql.index.size
+ unit: By
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver
+ version: latest