Skip to content

Commit

Permalink
[chore] Spelling exporter (#37133)
Browse files Browse the repository at this point in the history
#### Description

Fix spelling in exporter/

#37128 (review)

#### Link to tracking issue

* #37128

---------

Signed-off-by: Josh Soref <[email protected]>
  • Loading branch information
jsoref authored Jan 17, 2025
1 parent ae470e0 commit c506f54
Show file tree
Hide file tree
Showing 107 changed files with 274 additions and 283 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ func TestAlertManagerTracesExporterNoErrors(t *testing.T) {

type (
MockServer struct {
mockserver *httptest.Server // this means MockServer aggreagates 'httptest.Server', but can it's more like inheritance in C++
mockserver *httptest.Server // this means MockServer aggregates 'httptest.Server', but can it's more like inheritance in C++
fooCalledSuccessfully bool // this is false by default
}
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func createLogData(numberOfLogs int) plog.Logs {
logs := plog.NewLogs()
logs.ResourceLogs().AppendEmpty() // Add an empty ResourceLogs
rl := logs.ResourceLogs().AppendEmpty()
rl.Resource().Attributes().PutStr("resouceKey", "resourceValue")
rl.Resource().Attributes().PutStr("resourceKey", "resourceValue")
rl.Resource().Attributes().PutStr(conventions.AttributeServiceName, "test-log-service-exporter")
rl.Resource().Attributes().PutStr(conventions.AttributeHostName, "test-host")
sl := rl.ScopeLogs().AppendEmpty()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (
"go.uber.org/zap"
)

// newMetricsExporter return a new LogSerice metrics exporter.
// newMetricsExporter return a new LogService metrics exporter.
func newMetricsExporter(set exporter.Settings, cfg component.Config) (exporter.Metrics, error) {
l := &logServiceMetricsSender{
logger: set.Logger,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
},
{
"Key": "resource",
"Value": "{\"resouceKey\":\"resourceValue\"}"
"Value": "{\"resourceKey\":\"resourceValue\"}"
},
{
"Key": "otlp.name",
Expand Down Expand Up @@ -64,7 +64,7 @@
},
{
"Key": "resource",
"Value": "{\"resouceKey\":\"resourceValue\"}"
"Value": "{\"resourceKey\":\"resourceValue\"}"
},
{
"Key": "otlp.name",
Expand Down Expand Up @@ -118,7 +118,7 @@
},
{
"Key": "resource",
"Value": "{\"resouceKey\":\"resourceValue\"}"
"Value": "{\"resourceKey\":\"resourceValue\"}"
},
{
"Key": "otlp.name",
Expand Down Expand Up @@ -172,7 +172,7 @@
},
{
"Key": "resource",
"Value": "{\"resouceKey\":\"resourceValue\"}"
"Value": "{\"resourceKey\":\"resourceValue\"}"
},
{
"Key": "otlp.name",
Expand Down Expand Up @@ -226,7 +226,7 @@
},
{
"Key": "resource",
"Value": "{\"resouceKey\":\"resourceValue\"}"
"Value": "{\"resourceKey\":\"resourceValue\"}"
},
{
"Key": "otlp.name",
Expand Down Expand Up @@ -280,7 +280,7 @@
},
{
"Key": "resource",
"Value": "{\"resouceKey\":\"resourceValue\"}"
"Value": "{\"resourceKey\":\"resourceValue\"}"
},
{
"Key": "otlp.name",
Expand Down Expand Up @@ -334,7 +334,7 @@
},
{
"Key": "resource",
"Value": "{\"resouceKey\":\"resourceValue\"}"
"Value": "{\"resourceKey\":\"resourceValue\"}"
},
{
"Key": "otlp.name",
Expand Down Expand Up @@ -388,7 +388,7 @@
},
{
"Key": "resource",
"Value": "{\"resouceKey\":\"resourceValue\"}"
"Value": "{\"resourceKey\":\"resourceValue\"}"
},
{
"Key": "otlp.name",
Expand Down Expand Up @@ -442,7 +442,7 @@
},
{
"Key": "resource",
"Value": "{\"resouceKey\":\"resourceValue\"}"
"Value": "{\"resourceKey\":\"resourceValue\"}"
},
{
"Key": "otlp.name",
Expand Down
2 changes: 1 addition & 1 deletion exporter/alibabacloudlogserviceexporter/trace_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (
"go.uber.org/zap"
)

// newTracesExporter return a new LogSerice trace exporter.
// newTracesExporter return a new LogService trace exporter.
func newTracesExporter(set exporter.Settings, cfg component.Config) (exporter.Traces, error) {
l := &logServiceTraceSender{
logger: set.Logger,
Expand Down
2 changes: 1 addition & 1 deletion exporter/awscloudwatchlogsexporter/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ type Config struct {
// Possible values are 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, or 3653
LogRetention int64 `mapstructure:"log_retention"`

// Tags is the option to set tags for the CloudWatch Log Group. If specified, please add add at least 1 and at most 50 tags. Input is a string to string map like so: { 'key': 'value' }
// Tags is the option to set tags for the CloudWatch Log Group. If specified, please add at least 1 and at most 50 tags. Input is a string to string map like so: { 'key': 'value' }
// Keys must be between 1-128 characters and follow the regex pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]+)$
// Values must be between 1-256 characters and follow the regex pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$
Tags map[string]*string `mapstructure:"tags"`
Expand Down
4 changes: 2 additions & 2 deletions exporter/awsemfexporter/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ The following exporter configuration parameters are supported.
| `role_arn` | IAM role to upload segments to a different account. | |
| `max_retries` | Maximum number of retries before abandoning an attempt to post data. | 1 |
| `dimension_rollup_option` | DimensionRollupOption is the option for metrics dimension rollup. Three options are available: `NoDimensionRollup`, `SingleDimensionRollupOnly` and `ZeroAndSingleDimensionRollup`. The default value is `ZeroAndSingleDimensionRollup`. Enabling feature gate `awsemf.nodimrollupdefault` will set default to `NoDimensionRollup`. |"ZeroAndSingleDimensionRollup" (Enable both zero dimension rollup and single dimension rollup)|
| `resource_to_telemetry_conversion` | "resource_to_telemetry_conversion" is the option for converting resource attributes to telemetry attributes. It has only one config onption- `enabled`. For metrics, if `enabled=true`, all the resource attributes will be converted to metric labels by default. See `Resource Attributes to Metric Labels` section below for examples. | `enabled=false` |
| `resource_to_telemetry_conversion` | "resource_to_telemetry_conversion" is the option for converting resource attributes to telemetry attributes. It has only one config option- `enabled`. For metrics, if `enabled=true`, all the resource attributes will be converted to metric labels by default. See `Resource Attributes to Metric Labels` section below for examples. | `enabled=false` |
| `output_destination` | "output_destination" is an option to specify the EMFExporter output. Currently, two options are available. "cloudwatch" or "stdout" | `cloudwatch` |
| `detailed_metrics` | Retain detailed datapoint values in exported metrics (e.g instead of exporting a quantile as a statistical value, preserve the quantile's population) | `false` |
| `parse_json_encoded_attr_values` | List of attribute keys whose corresponding values are JSON-encoded strings and will be converted to JSON structures in emf logs. For example, the attribute string value "{\\"x\\":5,\\"y\\":6}" will be converted to a json object: ```{"x": 5, "y": 6}``` | [ ] |
Expand Down Expand Up @@ -73,7 +73,7 @@ A metric descriptor section allows the schema of a metric to be overwritten befo
| Name | Description | Default |
| :---------------- | :--------------------------------------------------------------------- | ------- |
| `metric_name` | The name of the metric to be overwritten. | |
| `unit` | The overwritten value of unit. The [MetricDatum](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html) contains a ful list of supported unit values. | |
| `unit` | The overwritten value of unit. The [MetricDatum](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html) contains a full list of supported unit values. | |
| `overwrite` | `true` if the schema should be overwritten with the given specification, otherwise it will only be configured if empty. | false |


Expand Down
4 changes: 2 additions & 2 deletions exporter/awsemfexporter/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ type Config struct {
// Note that at the moment in order to use this feature the value "kubernetes" must also be added to the ParseJSONEncodedAttributeValues array in order to be used
EKSFargateContainerInsightsEnabled bool `mapstructure:"eks_fargate_container_insights_enabled"`

// ResourceToTelemetrySettings is an option for converting resource attrihutes to telemetry attributes.
// ResourceToTelemetrySettings is an option for converting resource attributes to telemetry attributes.
// "Enabled" - A boolean field to enable/disable this option. Default is `false`.
// If enabled, all the resource attributes will be converted to metric labels by default.
ResourceToTelemetrySettings resourcetotelemetry.Settings `mapstructure:"resource_to_telemetry_conversion"`
Expand Down Expand Up @@ -124,7 +124,7 @@ func (config *Config) Validate() error {
if _, ok := eMFSupportedUnits[descriptor.Unit]; ok {
validDescriptors = append(validDescriptors, descriptor)
} else {
config.logger.Warn("Dropped unsupported metric desctriptor.", zap.String("unit", descriptor.Unit))
config.logger.Warn("Dropped unsupported metric descriptor.", zap.String("unit", descriptor.Unit))
}
}
config.MetricDescriptors = validDescriptors
Expand Down
2 changes: 1 addition & 1 deletion exporter/awsemfexporter/datapoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -567,7 +567,7 @@ func getDataPoints(pmd pmetric.Metric, metadata cWMetricMetadata, logger *zap.Lo
// For summaries coming from the prometheus receiver, the sum and count are cumulative, whereas for summaries
// coming from other sources, e.g. SDK, the sum and count are delta by being accumulated and reset periodically.
// In order to ensure metrics are sent as deltas, we check the receiver attribute (which can be injected by
// attribute processor) from resource metrics. If it exists, and equals to prometheus, the sum and count will be
// attribute processor) from resource metrics. If it exists, and is equal to prometheus, the sum and count will be
// converted.
// For more information: https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/prometheusreceiver/DESIGN.md#summary
metricMetadata.adjustToDelta = metadata.receiver == prometheusReceiver
Expand Down
8 changes: 4 additions & 4 deletions exporter/awsemfexporter/datapoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1968,7 +1968,7 @@ func TestCreateLabels(t *testing.T) {
labels := createLabels(labelsMap, "")
assert.Equal(t, expectedLabels, labels)

// With isntrumentation library name
// With instrumentation library name
labels = createLabels(labelsMap, "cloudwatch-otel")
expectedLabels[oTellibDimensionKey] = "cloudwatch-otel"
assert.Equal(t, expectedLabels, labels)
Expand All @@ -1977,7 +1977,7 @@ func TestCreateLabels(t *testing.T) {
func TestGetDataPoints(t *testing.T) {
logger := zap.NewNop()

normalDeltraMetricMetadata := generateDeltaMetricMetadata(false, "foo", false)
normalDeltaMetricMetadata := generateDeltaMetricMetadata(false, "foo", false)
cumulativeDeltaMetricMetadata := generateDeltaMetricMetadata(true, "foo", false)

testCases := []struct {
Expand All @@ -1991,7 +1991,7 @@ func TestGetDataPoints(t *testing.T) {
name: "Int gauge",
isPrometheusMetrics: false,
metric: generateTestGaugeMetric("foo", intValueType),
expectedDatapointSlice: numberDataPointSlice{normalDeltraMetricMetadata, pmetric.NumberDataPointSlice{}},
expectedDatapointSlice: numberDataPointSlice{normalDeltaMetricMetadata, pmetric.NumberDataPointSlice{}},
expectedAttributes: map[string]any{"label1": "value1"},
},
{
Expand Down Expand Up @@ -2019,7 +2019,7 @@ func TestGetDataPoints(t *testing.T) {
name: "Summary from SDK",
isPrometheusMetrics: false,
metric: generateTestSummaryMetric("foo"),
expectedDatapointSlice: summaryDataPointSlice{normalDeltraMetricMetadata, pmetric.SummaryDataPointSlice{}},
expectedDatapointSlice: summaryDataPointSlice{normalDeltaMetricMetadata, pmetric.SummaryDataPointSlice{}},
expectedAttributes: map[string]any{"label1": "value1"},
},
{
Expand Down
4 changes: 2 additions & 2 deletions exporter/awsemfexporter/emf_exporter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ func TestConsumeMetricsWithNaNValues(t *testing.T) {
generateFunc func(string) pmetric.Metrics
}{
{
"histograme-with-nan",
"histogram-with-nan",
generateTestHistogramMetricWithNaNs,
}, {
"gauge-with-nan",
Expand Down Expand Up @@ -110,7 +110,7 @@ func TestConsumeMetricsWithInfValues(t *testing.T) {
generateFunc func(string) pmetric.Metrics
}{
{
"histograme-with-inf",
"histogram-with-inf",
generateTestHistogramMetricWithInfs,
}, {
"gauge-with-inf",
Expand Down
4 changes: 2 additions & 2 deletions exporter/awsemfexporter/grouped_metric_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ func TestAddKubernetesWrapper(t *testing.T) {
dockerObj := struct {
ContainerID string `json:"container_id"`
}{
ContainerID: "Container mccontainter the third",
ContainerID: "Container mccontainer the third",
}
expectedCreatedObj := struct {
ContainerName string `json:"container_name"`
Expand All @@ -469,7 +469,7 @@ func TestAddKubernetesWrapper(t *testing.T) {
}

inputs := make(map[string]string)
inputs["container_id"] = "Container mccontainter the third"
inputs["container_id"] = "Container mccontainer the third"
inputs["container"] = "container mccontainer"
inputs["NodeName"] = "hosty de la host"
inputs["PodId"] = "Le id de Pod"
Expand Down
2 changes: 1 addition & 1 deletion exporter/awskinesisexporter/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
<!-- end autogenerated section -->

The kinesis exporter currently exports dynamic encodings to the configured kinesis stream.
The exporter relies heavily on the kinesis.PutRecords api to reduce network I/O and and reduces records into smallest atomic representation
The exporter relies heavily on the kinesis.PutRecords api to reduce network I/O and reduces records into smallest atomic representation
to avoid hitting the hard limits placed on Records (No greater than 1Mb).
This producer will block until the operation is done to allow for retryable and queued data to help during high loads.

Expand Down
2 changes: 1 addition & 1 deletion exporter/awskinesisexporter/internal/batch/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func (b *Batch) AddRecord(raw []byte, key string) error {
return nil
}

// Chunk breaks up the iternal queue into blocks that can be used
// Chunk breaks up the internal queue into blocks that can be used
// to be written to he kinesis.PutRecords endpoint
func (b *Batch) Chunk() (chunks [][]types.PutRecordsRequestEntry) {
// Using local copies to avoid mutating internal data
Expand Down
Loading

0 comments on commit c506f54

Please sign in to comment.