Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[exporter/elasticsearch] Add explicit bounds histogram support to metrics #34045

Merged
merged 13 commits into from
Jul 22, 2024
27 changes: 27 additions & 0 deletions .chloggen/elasticsearchexporter_metrics-histogram-support.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: elasticsearchexporter

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Add histogram support to metrics

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [34045]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
carsonip marked this conversation as resolved.
Show resolved Hide resolved

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: []
carsonip marked this conversation as resolved.
Show resolved Hide resolved
3 changes: 1 addition & 2 deletions exporter/elasticsearchexporter/data_stream_router.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (

"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)

Expand Down Expand Up @@ -60,7 +59,7 @@ func routeLogRecord(
// routeDataPoint returns the name of the index to send the data point to according to data stream routing attributes.
// This function may mutate record attributes.
func routeDataPoint(
dataPoint pmetric.NumberDataPoint,
dataPoint dataPoint,
scope pcommon.InstrumentationScope,
resource pcommon.Resource,
fIndex string,
Expand Down
71 changes: 54 additions & 17 deletions exporter/elasticsearchexporter/exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,27 +157,64 @@ func (e *elasticsearchExporter) pushMetricsData(
for k := 0; k < scopeMetrics.Metrics().Len(); k++ {
metric := scopeMetrics.Metrics().At(k)

// We only support Sum and Gauge metrics at the moment.
var dataPoints pmetric.NumberDataPointSlice
switch metric.Type() {
case pmetric.MetricTypeSum:
dataPoints = metric.Sum().DataPoints()
case pmetric.MetricTypeGauge:
dataPoints = metric.Gauge().DataPoints()
}

for l := 0; l < dataPoints.Len(); l++ {
dataPoint := dataPoints.At(l)
fIndex, err := e.getMetricDataPointIndex(resource, scope, dataPoint)
upsertDataPoint := func(dp dataPoint, dpValue pcommon.Value) error {
fIndex, err := e.getMetricDataPointIndex(resource, scope, dp)
if err != nil {
errs = append(errs, err)
continue
return err
}
if _, ok := resourceDocs[fIndex]; !ok {
resourceDocs[fIndex] = make(map[uint32]objmodel.Document)
}
if err := e.model.upsertMetricDataPoint(resourceDocs[fIndex], resource, scope, metric, dataPoint); err != nil {
errs = append(errs, err)

if err = e.model.upsertMetricDataPointValue(resourceDocs[fIndex], resource, scope, metric, dp, dpValue); err != nil {
return err
}
return nil
}

// TODO: support exponential histogram
switch metric.Type() {
case pmetric.MetricTypeSum:
dps := metric.Sum().DataPoints()
for l := 0; l < dps.Len(); l++ {
dp := dps.At(l)
val, err := numberToValue(dp)
if err != nil {
errs = append(errs, err)
continue
}
if err := upsertDataPoint(dp, val); err != nil {
errs = append(errs, err)
continue
}
}
case pmetric.MetricTypeGauge:
dps := metric.Gauge().DataPoints()
for l := 0; l < dps.Len(); l++ {
dp := dps.At(l)
val, err := numberToValue(dp)
if err != nil {
errs = append(errs, err)
continue
}
if err := upsertDataPoint(dp, val); err != nil {
errs = append(errs, err)
continue
}
}
case pmetric.MetricTypeHistogram:
dps := metric.Histogram().DataPoints()
for l := 0; l < dps.Len(); l++ {
dp := dps.At(l)
val, err := histogramToValue(dp)
if err != nil {
errs = append(errs, err)
continue
}
if err := upsertDataPoint(dp, val); err != nil {
errs = append(errs, err)
continue
}
}
}
}
Expand Down Expand Up @@ -211,7 +248,7 @@ func (e *elasticsearchExporter) pushMetricsData(
func (e *elasticsearchExporter) getMetricDataPointIndex(
resource pcommon.Resource,
scope pcommon.InstrumentationScope,
dataPoint pmetric.NumberDataPoint,
dataPoint dataPoint,
) (string, error) {
fIndex := e.index
if e.dynamicIndex {
Expand Down
99 changes: 99 additions & 0 deletions exporter/elasticsearchexporter/exporter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"encoding/json"
"errors"
"fmt"
"math"
"net/http"
"runtime"
"sync"
Expand Down Expand Up @@ -495,6 +496,7 @@ func TestExporterMetrics(t *testing.T) {
},
)
metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).SetName("my.metric")
metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).SetEmptySum().DataPoints().AppendEmpty().SetIntValue(0)
mustSendMetrics(t, exporter, metrics)

rec.WaitItems(1)
Expand Down Expand Up @@ -633,6 +635,103 @@ func TestExporterMetrics(t *testing.T) {

assertItemsEqual(t, expected, rec.Items(), false)
})

t.Run("publish histogram", func(t *testing.T) {
rec := newBulkRecorder()
server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) {
rec.Record(docs)
return itemsAllOK(docs)
})

exporter := newTestMetricsExporter(t, server.URL, func(cfg *Config) {
cfg.Mapping.Mode = "ecs"
})

metrics := pmetric.NewMetrics()
resourceMetrics := metrics.ResourceMetrics().AppendEmpty()
scopeA := resourceMetrics.ScopeMetrics().AppendEmpty()
metricSlice := scopeA.Metrics()
fooMetric := metricSlice.AppendEmpty()
fooMetric.SetName("metric.foo")
fooDps := fooMetric.SetEmptyHistogram().DataPoints()
fooDp := fooDps.AppendEmpty()
fooDp.ExplicitBounds().FromRaw([]float64{1.0, 2.0, 3.0})
fooDp.BucketCounts().FromRaw([]uint64{1, 2, 3, 4})
fooOtherDp := fooDps.AppendEmpty()
fooOtherDp.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(3600, 0)))
fooOtherDp.ExplicitBounds().FromRaw([]float64{4.0, 5.0, 6.0})
fooOtherDp.BucketCounts().FromRaw([]uint64{4, 5, 6, 7})

mustSendMetrics(t, exporter, metrics)

rec.WaitItems(2)

expected := []itemRequest{
{
Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`),
Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"counts":[1,2,3,4],"values":[0.5,1.5,2.5,3]}}}`),
},
{
Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`),
Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"counts":[4,5,6,7],"values":[2,4.5,5.5,6]}}}`),
},
}

assertItemsEqual(t, expected, rec.Items(), false)
})

t.Run("publish only valid data points", func(t *testing.T) {
rec := newBulkRecorder()
server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) {
rec.Record(docs)
return itemsAllOK(docs)
})

exporter := newTestMetricsExporter(t, server.URL, func(cfg *Config) {
cfg.Mapping.Mode = "ecs"
})

metrics := pmetric.NewMetrics()
resourceMetrics := metrics.ResourceMetrics().AppendEmpty()
scopeA := resourceMetrics.ScopeMetrics().AppendEmpty()
metricSlice := scopeA.Metrics()
fooMetric := metricSlice.AppendEmpty()
fooMetric.SetName("metric.foo")
fooDps := fooMetric.SetEmptyHistogram().DataPoints()
fooDp := fooDps.AppendEmpty()
fooDp.ExplicitBounds().FromRaw([]float64{1.0, 2.0, 3.0})
fooDp.BucketCounts().FromRaw([]uint64{})
fooOtherDp := fooDps.AppendEmpty()
fooOtherDp.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(3600, 0)))
fooOtherDp.ExplicitBounds().FromRaw([]float64{4.0, 5.0, 6.0})
fooOtherDp.BucketCounts().FromRaw([]uint64{4, 5, 6, 7})
barMetric := metricSlice.AppendEmpty()
barMetric.SetName("metric.bar")
barDps := barMetric.SetEmptySum().DataPoints()
barDp := barDps.AppendEmpty()
barDp.SetDoubleValue(math.Inf(1))
barOtherDp := barDps.AppendEmpty()
barOtherDp.SetDoubleValue(1.0)

err := exporter.ConsumeMetrics(context.Background(), metrics)
require.ErrorContains(t, err, "invalid histogram data point")
require.ErrorContains(t, err, "invalid number data point")
Comment on lines +716 to +718
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[to reviewer] ConsumeMetrics will return an error, I don't think we can return a partial error here. The valid metric data points will be published, and invalid ones will be dropped.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm wondering if you can elaborate on this. Is there a reason why you cannot use a PartialSuccess and indicate the number of points rejected?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks like it handles errors the same for histograms as it does for other metric types, so, this is probably irrelevant.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

pushMetrics returns an error, but PartialSuccess is not an error. I am not aware of a way to meaningfully return to caller to indicate a partial success in an exporter. Even in otelarrowexporter it returns a nil error if partial success happens.

The handling in this PR takes a different route. If it drops any data points, it returns an explicit error. We may revisit in the future whether this is desirable.


rec.WaitItems(2)

expected := []itemRequest{
{
Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`),
Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"bar":1}}`),
},
{
Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`),
Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"counts":[4,5,6,7],"values":[2,4.5,5.5,6]}}}`),
},
}

assertItemsEqual(t, expected, rec.Items(), false)
})
}

func TestExporterTraces(t *testing.T) {
Expand Down
79 changes: 72 additions & 7 deletions exporter/elasticsearchexporter/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"hash"
"hash/fnv"
Expand Down Expand Up @@ -65,7 +66,7 @@ var resourceAttrsToPreserve = map[string]bool{
type mappingModel interface {
encodeLog(pcommon.Resource, plog.LogRecord, pcommon.InstrumentationScope) ([]byte, error)
encodeSpan(pcommon.Resource, ptrace.Span, pcommon.InstrumentationScope) ([]byte, error)
upsertMetricDataPoint(map[uint32]objmodel.Document, pcommon.Resource, pcommon.InstrumentationScope, pmetric.Metric, pmetric.NumberDataPoint) error
upsertMetricDataPointValue(map[uint32]objmodel.Document, pcommon.Resource, pcommon.InstrumentationScope, pmetric.Metric, dataPoint, pcommon.Value) error
encodeDocument(objmodel.Document) ([]byte, error)
}

Expand All @@ -81,6 +82,11 @@ type encodeModel struct {
mode MappingMode
}

type dataPoint interface {
Timestamp() pcommon.Timestamp
Attributes() pcommon.Map
}

const (
traceIDField = "traceID"
spanIDField = "spanID"
Expand Down Expand Up @@ -185,7 +191,7 @@ func (m *encodeModel) encodeDocument(document objmodel.Document) ([]byte, error)
return buf.Bytes(), nil
}

func (m *encodeModel) upsertMetricDataPoint(documents map[uint32]objmodel.Document, resource pcommon.Resource, _ pcommon.InstrumentationScope, metric pmetric.Metric, dp pmetric.NumberDataPoint) error {
func (m *encodeModel) upsertMetricDataPointValue(documents map[uint32]objmodel.Document, resource pcommon.Resource, _ pcommon.InstrumentationScope, metric pmetric.Metric, dp dataPoint, value pcommon.Value) error {
hash := metricHash(dp.Timestamp(), dp.Attributes())
var (
document objmodel.Document
Expand All @@ -197,15 +203,74 @@ func (m *encodeModel) upsertMetricDataPoint(documents map[uint32]objmodel.Docume
document.AddAttributes("", dp.Attributes())
}

document.AddAttribute(metric.Name(), value)

documents[hash] = document
return nil
}

func histogramToValue(dp pmetric.HistogramDataPoint) (pcommon.Value, error) {
// Histogram conversion function is from
// https://github.com/elastic/apm-data/blob/3b28495c3cbdc0902983134276eb114231730249/input/otlp/metrics.go#L277
bucketCounts := dp.BucketCounts()
explicitBounds := dp.ExplicitBounds()
if bucketCounts.Len() != explicitBounds.Len()+1 || explicitBounds.Len() == 0 {
return pcommon.Value{}, errors.New("invalid histogram data point")
}

vm := pcommon.NewValueMap()
m := vm.Map()
counts := m.PutEmptySlice("counts")
values := m.PutEmptySlice("values")

values.EnsureCapacity(bucketCounts.Len())
counts.EnsureCapacity(bucketCounts.Len())
for i := 0; i < bucketCounts.Len(); i++ {
count := bucketCounts.At(i)
if count == 0 {
continue
}

var value float64
switch i {
// (-infinity, explicit_bounds[i]]
case 0:
value = explicitBounds.At(i)
if value > 0 {
value /= 2
}

// (explicit_bounds[i], +infinity)
case bucketCounts.Len() - 1:
value = explicitBounds.At(i - 1)

// [explicit_bounds[i-1], explicit_bounds[i])
default:
// Use the midpoint between the boundaries.
value = explicitBounds.At(i-1) + (explicitBounds.At(i)-explicitBounds.At(i-1))/2.0
}

counts.AppendEmpty().SetInt(int64(count))
values.AppendEmpty().SetDouble(value)
}

return vm, nil
}

var errInvalidNumberDataPoint = errors.New("invalid number data point")

func numberToValue(dp pmetric.NumberDataPoint) (pcommon.Value, error) {
switch dp.ValueType() {
case pmetric.NumberDataPointValueTypeDouble:
document.AddAttribute(metric.Name(), pcommon.NewValueDouble(dp.DoubleValue()))
value := dp.DoubleValue()
if math.IsNaN(value) || math.IsInf(value, 0) {
return pcommon.Value{}, errInvalidNumberDataPoint
}
return pcommon.NewValueDouble(value), nil
case pmetric.NumberDataPointValueTypeInt:
document.AddAttribute(metric.Name(), pcommon.NewValueInt(dp.IntValue()))
return pcommon.NewValueInt(dp.IntValue()), nil
}

documents[hash] = document
return nil
return pcommon.Value{}, errInvalidNumberDataPoint
}

func (m *encodeModel) encodeSpan(resource pcommon.Resource, span ptrace.Span, scope pcommon.InstrumentationScope) ([]byte, error) {
Expand Down
7 changes: 5 additions & 2 deletions exporter/elasticsearchexporter/model_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,11 +97,14 @@ func TestEncodeMetric(t *testing.T) {

var docsBytes [][]byte
for i := 0; i < metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().Len(); i++ {
err := model.upsertMetricDataPoint(docs,
val, err := numberToValue(metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(i))
require.NoError(t, err)
err = model.upsertMetricDataPointValue(docs,
metrics.ResourceMetrics().At(0).Resource(),
metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Scope(),
metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0),
metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(i))
metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(i),
val)
require.NoError(t, err)
}

Expand Down
Loading