diff --git a/internal/alloy/internal/testcomponents/module/module.go b/internal/alloy/internal/testcomponents/module/module.go index 3079d3c74e..6ea10c9ca6 100644 --- a/internal/alloy/internal/testcomponents/module/module.go +++ b/internal/alloy/internal/testcomponents/module/module.go @@ -40,7 +40,7 @@ func NewModuleComponent(o component.Options) (*ModuleComponent, error) { return c, err } -// LoadAlloySource loads the flow controller with the current component source. +// LoadAlloySource loads the controller with the current component source. // It will set the component health in addition to return the error so that the consumer can rely on either or both. // If the content is the same as the last time it was successfully loaded, it will not be reloaded. func (c *ModuleComponent) LoadAlloySource(args map[string]any, contentValue string) error { @@ -70,7 +70,7 @@ func (c *ModuleComponent) LoadAlloySource(args map[string]any, contentValue stri return nil } -// RunAlloyController runs the flow controller that all module components start. +// RunAlloyController runs the controller that all module components start. func (c *ModuleComponent) RunAlloyController(ctx context.Context) { err := c.mod.Run(ctx) if err != nil { diff --git a/internal/alloycli/flowmode.go b/internal/alloycli/alloycli.go similarity index 100% rename from internal/alloycli/flowmode.go rename to internal/alloycli/alloycli.go diff --git a/internal/cmd/alloylint/internal/findcomponents/findcomponents.go b/internal/cmd/alloylint/internal/findcomponents/findcomponents.go index 76d9386e9f..03b5b5a9b3 100644 --- a/internal/cmd/alloylint/internal/findcomponents/findcomponents.go +++ b/internal/cmd/alloylint/internal/findcomponents/findcomponents.go @@ -1,4 +1,4 @@ -// Package findcomponents exposes an Analyzer which ensures that created Flow +// Package findcomponents exposes an Analyzer which ensures that created // components are imported by a registry package. package findcomponents @@ -13,7 +13,7 @@ import ( var Analyzer = &analysis.Analyzer{ Name: "findcomponents", - Doc: "ensure Flow components are imported", + Doc: "ensure components are imported", Run: run, } @@ -67,8 +67,8 @@ func run(p *analysis.Pass) (interface{}, error) { } // findComponentPackages returns a map of discovered packages which declare -// Flow components. The pattern argument controls the full list of patterns -// which are searched (e.g., "./..." or "./component/..."). +// components. The pattern argument controls the full list of patterns which +// are searched (e.g., "./..." or "./component/..."). func findComponentPackages(pattern string) (map[string]struct{}, error) { pkgs, err := packages.Load(&packages.Config{ Mode: packages.NeedName | packages.NeedTypes | packages.NeedSyntax | packages.NeedTypesInfo, diff --git a/internal/component/common/config/selectors.go b/internal/component/common/config/selectors.go index 61489604b8..8c707f9527 100644 --- a/internal/component/common/config/selectors.go +++ b/internal/component/common/config/selectors.go @@ -12,7 +12,7 @@ type LabelSelector struct { MatchExpressions []MatchExpression `alloy:"match_expression,block,optional"` } -// BuildSelector builds a [labels.Selector] from a Flow LabelSelector. +// BuildSelector builds a [labels.Selector] from an Alloy LabelSelector. func (ls *LabelSelector) BuildSelector() (labels.Selector, error) { if ls == nil { return metav1.LabelSelectorAsSelector(nil) diff --git a/internal/component/common/loki/client/manager.go b/internal/component/common/loki/client/manager.go index b4ebbb3806..2b91d65210 100644 --- a/internal/component/common/loki/client/manager.go +++ b/internal/component/common/loki/client/manager.go @@ -66,12 +66,14 @@ func (p watcherClientPair) Stop(drain bool) { p.client.Stop() } -// Manager manages remote write client instantiation, and connects the related components to orchestrate the flow of loki.Entry -// from the scrape targets, to the remote write clients themselves. +// Manager manages remote write client instantiation, and connects the related +// components to orchestrate the flow of loki.Entry from the scrape targets, to +// the remote write clients themselves. // -// Right now it just supports instantiating the WAL writer side of the future-to-be WAL enabled client. In follow-up -// work, tracked in https://github.com/grafana/loki/issues/8197, this Manager will be responsible for instantiating all client -// types: Logger, Multi and WAL. +// Right now it just supports instantiating the WAL writer side of the +// future-to-be WAL enabled client. In follow-up work, tracked in +// https://github.com/grafana/loki/issues/8197, this Manager will be +// responsible for instantiating all client types: Logger, Multi and WAL. type Manager struct { name string diff --git a/internal/component/common/net/server.go b/internal/component/common/net/server.go index 1f526bd2d7..67ad00f807 100644 --- a/internal/component/common/net/server.go +++ b/internal/component/common/net/server.go @@ -11,9 +11,10 @@ import ( "github.com/prometheus/common/model" ) -// TargetServer is wrapper around dskit.Server that handles some common configuration used in all flow components -// that expose a network server. It just handles configuration and initialization, the handlers implementation are left -// to the consumer. +// TargetServer is wrapper around dskit.Server that handles some common +// configuration used in all components that expose a network server. It just +// handles configuration and initialization, the handlers implementation are +// left to the consumer. type TargetServer struct { logger log.Logger config *dskit.Config @@ -48,9 +49,10 @@ func NewTargetServer(logger log.Logger, metricsNamespace string, reg prometheus. ts.config = &serverCfg // To prevent metric collisions because all metrics are going to be registered in the global Prometheus registry. ts.config.MetricsNamespace = ts.metricsNamespace - // We don't want the /debug and /metrics endpoints running, since this is not the main Flow HTTP server. - // We want this target to expose the least surface area possible, hence disabling dskit HTTP server metrics - // and debugging functionality. + // We don't want the /debug and /metrics endpoints running, since this is not + // the main HTTP server. We want this target to expose the least surface area + // possible, hence disabling dskit HTTP server metrics and debugging + // functionality. ts.config.RegisterInstrumentation = false // Add logger to dskit ts.config.Log = ts.logger diff --git a/internal/component/component.go b/internal/component/component.go index 4bb22e9123..43bbb41481 100644 --- a/internal/component/component.go +++ b/internal/component/component.go @@ -1,6 +1,6 @@ -// Package component describes the interfaces which Flow components implement. +// Package component describes the interfaces which components implement. // -// A Flow component is a distinct piece of business logic that accepts inputs +// A component is a distinct piece of business logic that accepts inputs // (Arguments) for its configuration and can optionally export a set of outputs // (Exports). // @@ -23,7 +23,7 @@ // // The set of River element names of a given component's Arguments and Exports // types must not overlap. Additionally, the following River field and block -// names are reserved for use by the Flow controller: +// names are reserved for use by the Alloy controller: // // - for_each // - enabled @@ -76,9 +76,9 @@ type Arguments interface{} // Exports implementations. type Exports interface{} -// Component is the base interface for a Flow component. Components may -// implement extension interfaces (named Component) to implement -// extra known behavior. +// Component is the base interface for a component. Components may implement +// extension interfaces (named Component) to implement extra known +// behavior. type Component interface { // Run starts the component, blocking until ctx is canceled or the component // suffers a fatal error. Run is guaranteed to be called exactly once per diff --git a/internal/component/component_health.go b/internal/component/component_health.go index bfb13ae94d..6ce71fdec2 100644 --- a/internal/component/component_health.go +++ b/internal/component/component_health.go @@ -16,7 +16,7 @@ type HealthComponent interface { // CurrentHealth returns the current Health status for the component. // - // CurrentHealth may be overridden by the Flow controller if there is a + // CurrentHealth may be overridden by the Alloy controller if there is a // higher-level issue, such as a config file being invalid or a Component // shutting down unexpectedly. CurrentHealth() Health diff --git a/internal/component/discovery/discovery.go b/internal/component/discovery/discovery.go index fc30bbcccc..453a17bd77 100644 --- a/internal/component/discovery/discovery.go +++ b/internal/component/discovery/discovery.go @@ -21,7 +21,7 @@ import ( type Target map[string]string // DistributedTargets uses the node's Lookup method to distribute discovery -// targets when a Flow component runs in a cluster. +// targets when a component runs in a cluster. type DistributedTargets struct { useClustering bool cluster cluster.Cluster diff --git a/internal/component/discovery/relabel/relabel.go b/internal/component/discovery/relabel/relabel.go index c4f2c419db..65d271d462 100644 --- a/internal/component/discovery/relabel/relabel.go +++ b/internal/component/discovery/relabel/relabel.go @@ -5,7 +5,7 @@ import ( "sync" "github.com/grafana/alloy/internal/component" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/discovery" "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/prometheus/model/labels" @@ -31,13 +31,13 @@ type Arguments struct { Targets []discovery.Target `alloy:"targets,attr"` // The relabelling rules to apply to each target's label set. - RelabelConfigs []*flow_relabel.Config `alloy:"rule,block,optional"` + RelabelConfigs []*alloy_relabel.Config `alloy:"rule,block,optional"` } // Exports holds values which are exported by the discovery.relabel component. type Exports struct { - Output []discovery.Target `alloy:"output,attr"` - Rules flow_relabel.Rules `alloy:"rules,attr"` + Output []discovery.Target `alloy:"output,attr"` + Rules alloy_relabel.Rules `alloy:"rules,attr"` } // Component implements the discovery.relabel component. @@ -76,7 +76,7 @@ func (c *Component) Update(args component.Arguments) error { newArgs := args.(Arguments) targets := make([]discovery.Target, 0, len(newArgs.Targets)) - relabelConfigs := flow_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelConfigs) + relabelConfigs := alloy_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelConfigs) c.rcs = relabelConfigs for _, t := range newArgs.Targets { diff --git a/internal/component/discovery/relabel/relabel_test.go b/internal/component/discovery/relabel/relabel_test.go index 4c217a39d5..5550c4d722 100644 --- a/internal/component/discovery/relabel/relabel_test.go +++ b/internal/component/discovery/relabel/relabel_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/alloy/internal/alloy/componenttest" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/discovery" "github.com/grafana/alloy/internal/component/discovery/relabel" "github.com/grafana/alloy/syntax" @@ -118,8 +118,8 @@ rule { require.Len(t, gotOriginal, 1) require.Len(t, gotUpdated, 1) - require.Equal(t, gotOriginal[0].Action, flow_relabel.Keep) - require.Equal(t, gotUpdated[0].Action, flow_relabel.Drop) + require.Equal(t, gotOriginal[0].Action, alloy_relabel.Keep) + require.Equal(t, gotUpdated[0].Action, alloy_relabel.Drop) require.Equal(t, gotUpdated[0].SourceLabels, gotOriginal[0].SourceLabels) require.Equal(t, gotUpdated[0].Regex, gotOriginal[0].Regex) } diff --git a/internal/component/local/file_match/file_test.go b/internal/component/local/file_match/file_test.go index 8a19074c32..17839cf005 100644 --- a/internal/component/local/file_match/file_test.go +++ b/internal/component/local/file_match/file_test.go @@ -260,7 +260,7 @@ func createComponentWithLabels(t *testing.T, dir string, paths []string, exclude } c, err := New(component.Options{ ID: "test", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), DataPath: dir, OnStateChange: func(e component.Exports) { diff --git a/internal/component/loki/process/process_test.go b/internal/component/loki/process/process_test.go index 456f4bbefd..b874f4a0de 100644 --- a/internal/component/loki/process/process_test.go +++ b/internal/component/loki/process/process_test.go @@ -74,7 +74,7 @@ func TestJSONLabelsStage(t *testing.T) { // Create and run the component, so that it can process and forwards logs. opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } @@ -161,7 +161,7 @@ stage.label_keep { // Create and run the component, so that it can process and forwards logs. opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } @@ -256,7 +256,7 @@ stage.labels { // Create and run the component, so that it can process and forwards logs. opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } @@ -422,7 +422,7 @@ func TestDeadlockWithFrequentUpdates(t *testing.T) { // Create and run the component, so that it can process and forwards logs. opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } diff --git a/internal/component/loki/process/stages/drop_test.go b/internal/component/loki/process/stages/drop_test.go index 55e5c2a9f9..f7fc44f8a5 100644 --- a/internal/component/loki/process/stages/drop_test.go +++ b/internal/component/loki/process/stages/drop_test.go @@ -415,7 +415,7 @@ func Test_dropStage_Process(t *testing.T) { if err != nil { t.Error(err) } - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) m, err := newDropStage(logger, *tt.config, prometheus.DefaultRegisterer) require.NoError(t, err) out := processEntries(m, newEntry(tt.extracted, tt.labels, tt.entry, tt.t)) @@ -431,7 +431,7 @@ func Test_dropStage_Process(t *testing.T) { func TestDropPipeline(t *testing.T) { registry := prometheus.NewRegistry() plName := "test_drop_pipeline" - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) pl, err := NewPipeline(logger, loadConfig(testDropRiver), &plName, registry) require.NoError(t, err) out := processEntries(pl, diff --git a/internal/component/loki/process/stages/json_test.go b/internal/component/loki/process/stages/json_test.go index 6e695cef68..b157aa815c 100644 --- a/internal/component/loki/process/stages/json_test.go +++ b/internal/component/loki/process/stages/json_test.go @@ -45,7 +45,7 @@ var testJSONLogLine = ` func TestPipeline_JSON(t *testing.T) { t.Parallel() - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) tests := map[string]struct { config string @@ -212,7 +212,7 @@ var logFixture = ` func TestJSONParser_Parse(t *testing.T) { t.Parallel() - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) var logString = "log" tests := map[string]struct { @@ -351,7 +351,7 @@ func TestJSONParser_Parse(t *testing.T) { } func TestValidateJSONDrop(t *testing.T) { - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) labels := map[string]string{"foo": "bar"} matchConfig := &JSONConfig{ DropMalformed: true, diff --git a/internal/component/loki/process/stages/logfmt_test.go b/internal/component/loki/process/stages/logfmt_test.go index ed620e73a9..6923466bef 100644 --- a/internal/component/loki/process/stages/logfmt_test.go +++ b/internal/component/loki/process/stages/logfmt_test.go @@ -128,7 +128,7 @@ var testLogfmtLogFixture = ` func TestLogfmtParser_Parse(t *testing.T) { t.Parallel() - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) tests := map[string]struct { config LogfmtConfig extracted map[string]interface{} diff --git a/internal/component/loki/process/stages/match_test.go b/internal/component/loki/process/stages/match_test.go index 5e0f52600b..9f8be461ca 100644 --- a/internal/component/loki/process/stages/match_test.go +++ b/internal/component/loki/process/stages/match_test.go @@ -64,7 +64,7 @@ var testMatchLogLineApp2 = ` func TestMatchStage(t *testing.T) { registry := prometheus.NewRegistry() plName := "test_match_pipeline" - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) pl, err := NewPipeline(logger, loadConfig(testMatchRiver), &plName, registry) if err != nil { t.Fatal(err) @@ -159,7 +159,7 @@ func TestMatcher(t *testing.T) { "", "", } - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) s, err := newMatcherStage(logger, nil, matchConfig, prometheus.DefaultRegisterer) if (err != nil) != tt.wantErr { t.Errorf("withMatcher() error = %v, wantErr %v", err, tt.wantErr) diff --git a/internal/component/loki/process/stages/multiline_test.go b/internal/component/loki/process/stages/multiline_test.go index d85bf3479c..c462b19a82 100644 --- a/internal/component/loki/process/stages/multiline_test.go +++ b/internal/component/loki/process/stages/multiline_test.go @@ -14,7 +14,7 @@ import ( ) func TestMultilineStageProcess(t *testing.T) { - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) mcfg := MultilineConfig{Expression: "^START", MaxWaitTime: 3 * time.Second} err := validateMultilineConfig(&mcfg) require.NoError(t, err) @@ -41,7 +41,7 @@ func TestMultilineStageProcess(t *testing.T) { } func TestMultilineStageMultiStreams(t *testing.T) { - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) mcfg := MultilineConfig{Expression: "^START", MaxWaitTime: 3 * time.Second} err := validateMultilineConfig(&mcfg) require.NoError(t, err) @@ -81,7 +81,7 @@ func TestMultilineStageMultiStreams(t *testing.T) { } func TestMultilineStageMaxWaitTime(t *testing.T) { - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) mcfg := MultilineConfig{Expression: "^START", MaxWaitTime: 100 * time.Millisecond} err := validateMultilineConfig(&mcfg) require.NoError(t, err) diff --git a/internal/component/loki/process/stages/output_test.go b/internal/component/loki/process/stages/output_test.go index d5ba6076db..eb06048fcc 100644 --- a/internal/component/loki/process/stages/output_test.go +++ b/internal/component/loki/process/stages/output_test.go @@ -43,7 +43,7 @@ var testOutputLogLineWithMissingKey = ` ` func TestPipeline_Output(t *testing.T) { - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) pl, err := NewPipeline(logger, loadConfig(testOutputRiver), nil, prometheus.DefaultRegisterer) require.NoError(t, err) diff --git a/internal/component/loki/process/stages/pack_test.go b/internal/component/loki/process/stages/pack_test.go index 43f6a172c5..a2602560f2 100644 --- a/internal/component/loki/process/stages/pack_test.go +++ b/internal/component/loki/process/stages/pack_test.go @@ -37,7 +37,7 @@ stage.match { func TestPackPipeline(t *testing.T) { registry := prometheus.NewRegistry() plName := "test_pack_pipeline" - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) pl, err := NewPipeline(logger, loadConfig(testPackRiver), &plName, registry) require.NoError(t, err) @@ -335,7 +335,7 @@ func TestPackStage(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) m := newPackStage(logger, *tt.config, prometheus.DefaultRegisterer) // Normal pipeline operation will put all the labels into the extracted map // replicate that here. diff --git a/internal/component/loki/process/stages/regex_test.go b/internal/component/loki/process/stages/regex_test.go index d17ce2e7a9..3adc3bd7d8 100644 --- a/internal/component/loki/process/stages/regex_test.go +++ b/internal/component/loki/process/stages/regex_test.go @@ -107,7 +107,7 @@ func TestPipeline_Regex(t *testing.T) { t.Run(testName, func(t *testing.T) { t.Parallel() - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) pl, err := NewPipeline(logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) if err != nil { t.Fatal(err) @@ -298,7 +298,7 @@ func TestRegexParser_Parse(t *testing.T) { tt := tt t.Run(tName, func(t *testing.T) { t.Parallel() - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) p, err := New(logger, nil, StageConfig{RegexConfig: &tt.config}, nil) if err != nil { t.Fatalf("failed to create regex parser: %s", err) @@ -323,7 +323,7 @@ func BenchmarkRegexStage(b *testing.B) { } for _, bm := range benchmarks { b.Run(bm.name, func(b *testing.B) { - logger := util.TestFlowLogger(b) + logger := util.TestAlloyLogger(b) stage, err := New(logger, nil, StageConfig{RegexConfig: &bm.config}, nil) if err != nil { panic(err) diff --git a/internal/component/loki/process/stages/replace_test.go b/internal/component/loki/process/stages/replace_test.go index 3f940fbd70..2f2430c5ff 100644 --- a/internal/component/loki/process/stages/replace_test.go +++ b/internal/component/loki/process/stages/replace_test.go @@ -70,7 +70,7 @@ var testReplaceLogLineAdjacentCaptureGroups = `abc` func TestReplace(t *testing.T) { t.Parallel() - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) tests := map[string]struct { config string diff --git a/internal/component/loki/process/stages/timestamp_test.go b/internal/component/loki/process/stages/timestamp_test.go index 7e7de1887d..8e624e4009 100644 --- a/internal/component/loki/process/stages/timestamp_test.go +++ b/internal/component/loki/process/stages/timestamp_test.go @@ -42,7 +42,7 @@ var testTimestampLogLineWithMissingKey = ` ` func TestTimestampPipeline(t *testing.T) { - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) pl, err := NewPipeline(logger, loadConfig(testTimestampRiver), nil, prometheus.DefaultRegisterer) require.NoError(t, err) @@ -287,7 +287,7 @@ func TestTimestampStage_Process(t *testing.T) { test := test t.Run(name, func(t *testing.T) { t.Parallel() - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) st, err := newTimestampStage(logger, test.config) require.NoError(t, err) @@ -428,7 +428,7 @@ func TestTimestampStage_ProcessActionOnFailure(t *testing.T) { // Ensure the test has been correctly set require.Equal(t, len(testData.inputEntries), len(testData.expectedTimestamps)) - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) s, err := newTimestampStage(logger, testData.config) require.NoError(t, err) diff --git a/internal/component/loki/relabel/relabel.go b/internal/component/loki/relabel/relabel.go index dd1304b276..9a78a334d0 100644 --- a/internal/component/loki/relabel/relabel.go +++ b/internal/component/loki/relabel/relabel.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/alloy/internal/alloy/logging/level" "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/featuregate" lru "github.com/hashicorp/golang-lru" "github.com/prometheus/common/model" @@ -35,7 +35,7 @@ type Arguments struct { ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` // The relabelling rules to apply to each log entry before it's forwarded. - RelabelConfigs []*flow_relabel.Config `alloy:"rule,block,optional"` + RelabelConfigs []*alloy_relabel.Config `alloy:"rule,block,optional"` // The maximum number of items to hold in the component's LRU cache. MaxCacheSize int `alloy:"max_cache_size,attr,optional"` @@ -54,8 +54,8 @@ func (a *Arguments) SetToDefault() { // Exports holds values which are exported by the loki.relabel component. type Exports struct { - Receiver loki.LogsReceiver `alloy:"receiver,attr"` - Rules flow_relabel.Rules `alloy:"rules,attr"` + Receiver loki.LogsReceiver `alloy:"receiver,attr"` + Rules alloy_relabel.Rules `alloy:"rules,attr"` } // Component implements the loki.relabel component. @@ -136,7 +136,7 @@ func (c *Component) Update(args component.Arguments) error { defer c.mut.Unlock() newArgs := args.(Arguments) - newRCS := flow_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelConfigs) + newRCS := alloy_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelConfigs) if relabelingChanged(c.rcs, newRCS) { level.Debug(c.opts.Logger).Log("msg", "received new relabel configs, purging cache") c.cache.Purge() diff --git a/internal/component/loki/relabel/relabel_test.go b/internal/component/loki/relabel/relabel_test.go index 6de0322bbf..3d1f056562 100644 --- a/internal/component/loki/relabel/relabel_test.go +++ b/internal/component/loki/relabel/relabel_test.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/alloy/internal/alloy/componenttest" "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/discovery" lsf "github.com/grafana/alloy/internal/component/loki/source/file" "github.com/grafana/alloy/internal/util" @@ -45,7 +45,7 @@ func TestRelabeling(t *testing.T) { // an easy way to refer to a loki.LogsReceiver value for the forward_to // argument. type cfg struct { - Rcs []*flow_relabel.Config `alloy:"rule,block,optional"` + Rcs []*alloy_relabel.Config `alloy:"rule,block,optional"` } var relabelConfigs cfg err := syntax.Unmarshal([]byte(rc), &relabelConfigs) @@ -55,7 +55,7 @@ func TestRelabeling(t *testing.T) { // Create and run the component, so that it relabels and forwards logs. opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } @@ -108,7 +108,7 @@ func TestRelabeling(t *testing.T) { func BenchmarkRelabelComponent(b *testing.B) { type cfg struct { - Rcs []*flow_relabel.Config `alloy:"rule,block,optional"` + Rcs []*alloy_relabel.Config `alloy:"rule,block,optional"` } var relabelConfigs cfg _ = syntax.Unmarshal([]byte(rc), &relabelConfigs) @@ -116,7 +116,7 @@ func BenchmarkRelabelComponent(b *testing.B) { // Create and run the component, so that it relabels and forwards logs. opts := component.Options{ - Logger: util.TestFlowLogger(b), + Logger: util.TestAlloyLogger(b), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } @@ -154,7 +154,7 @@ func BenchmarkRelabelComponent(b *testing.B) { func TestCache(t *testing.T) { type cfg struct { - Rcs []*flow_relabel.Config `alloy:"rule,block,optional"` + Rcs []*alloy_relabel.Config `alloy:"rule,block,optional"` } var relabelConfigs cfg err := syntax.Unmarshal([]byte(rc), &relabelConfigs) @@ -164,16 +164,16 @@ func TestCache(t *testing.T) { // Create and run the component, so that it relabels and forwards logs. opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } args := Arguments{ ForwardTo: []loki.LogsReceiver{ch1}, - RelabelConfigs: []*flow_relabel.Config{ + RelabelConfigs: []*alloy_relabel.Config{ { SourceLabels: []string{"name", "A"}, - Regex: flow_relabel.Regexp(relabel.MustNewRegexp("(.+)")), + Regex: alloy_relabel.Regexp(relabel.MustNewRegexp("(.+)")), Action: "replace", TargetLabel: "env", @@ -419,8 +419,8 @@ func TestRuleGetter(t *testing.T) { require.Len(t, gotOriginal, 1) require.Len(t, gotUpdated, 1) - require.Equal(t, gotOriginal[0].Action, flow_relabel.Keep) - require.Equal(t, gotUpdated[0].Action, flow_relabel.Drop) + require.Equal(t, gotOriginal[0].Action, alloy_relabel.Keep) + require.Equal(t, gotUpdated[0].Action, alloy_relabel.Drop) require.Equal(t, gotUpdated[0].SourceLabels, gotOriginal[0].SourceLabels) require.Equal(t, gotUpdated[0].Regex, gotOriginal[0].Regex) } diff --git a/internal/component/loki/source/api/api_test.go b/internal/component/loki/source/api/api_test.go index bc4263d045..44ce84d3a2 100644 --- a/internal/component/loki/source/api/api_test.go +++ b/internal/component/loki/source/api/api_test.go @@ -389,7 +389,7 @@ func newTestLokiClient(t *testing.T, args Arguments, opts component.Options) cli func defaultOptions(t *testing.T) component.Options { return component.Options{ ID: "loki.source.api.test", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), } } diff --git a/internal/component/loki/source/api/internal/lokipush/push_api_server.go b/internal/component/loki/source/api/internal/lokipush/push_api_server.go index a0340e5d4a..aeed301a4e 100644 --- a/internal/component/loki/source/api/internal/lokipush/push_api_server.go +++ b/internal/component/loki/source/api/internal/lokipush/push_api_server.go @@ -133,7 +133,7 @@ func (s *PushAPIServer) getRelabelRules() []*relabel.Config { } // NOTE: This code is copied from Promtail (https://github.com/grafana/loki/commit/47e2c5884f443667e64764f3fc3948f8f11abbb8) with changes kept to the minimum. -// Only the HTTP handler functions are copied to allow for flow-specific server configuration and lifecycle management. +// Only the HTTP handler functions are copied to allow for Alloy-specific server configuration and lifecycle management. func (s *PushAPIServer) handleLoki(w http.ResponseWriter, r *http.Request) { logger := util_log.WithContext(r.Context(), util_log.Logger) userID, _ := tenant.TenantID(r.Context()) @@ -209,7 +209,7 @@ func (s *PushAPIServer) handleLoki(w http.ResponseWriter, r *http.Request) { } // NOTE: This code is copied from Promtail (https://github.com/grafana/loki/commit/47e2c5884f443667e64764f3fc3948f8f11abbb8) with changes kept to the minimum. -// Only the HTTP handler functions are copied to allow for flow-specific server configuration and lifecycle management. +// Only the HTTP handler functions are copied to allow for Alloy-specific server configuration and lifecycle management. func (s *PushAPIServer) handlePlaintext(w http.ResponseWriter, r *http.Request) { entries := s.handler.Chan() defer r.Body.Close() @@ -245,7 +245,7 @@ func (s *PushAPIServer) handlePlaintext(w http.ResponseWriter, r *http.Request) } // NOTE: This code is copied from Promtail (https://github.com/grafana/loki/commit/47e2c5884f443667e64764f3fc3948f8f11abbb8) with changes kept to the minimum. -// Only the HTTP handler functions are copied to allow for flow-specific server configuration and lifecycle management. +// Only the HTTP handler functions are copied to allow for Alloy-specific server configuration and lifecycle management. func (s *PushAPIServer) ready(w http.ResponseWriter, r *http.Request) { resp := "ready" if _, err := w.Write([]byte(resp)); err != nil { diff --git a/internal/component/loki/source/aws_firehose/component.go b/internal/component/loki/source/aws_firehose/component.go index 274b00b100..8031d84b62 100644 --- a/internal/component/loki/source/aws_firehose/component.go +++ b/internal/component/loki/source/aws_firehose/component.go @@ -16,7 +16,7 @@ import ( "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" fnet "github.com/grafana/alloy/internal/component/common/net" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/loki/source/aws_firehose/internal" "github.com/grafana/alloy/internal/util" ) @@ -38,7 +38,7 @@ type Arguments struct { AccessKey alloytypes.Secret `alloy:"access_key,attr,optional"` UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` - RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + RelabelRules alloy_relabel.Rules `alloy:"relabel_rules,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -129,7 +129,7 @@ func (c *Component) Update(args component.Arguments) error { // then, if the relabel rules changed if newArgs.RelabelRules != nil && len(newArgs.RelabelRules) > 0 { handlerNeedsUpdate = true - newRelabels = flow_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) + newRelabels = alloy_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) } else if c.rbs != nil && len(c.rbs) > 0 && (newArgs.RelabelRules == nil || len(newArgs.RelabelRules) == 0) { // nil out relabel rules if they need to be cleared handlerNeedsUpdate = true diff --git a/internal/component/loki/source/aws_firehose/component_test.go b/internal/component/loki/source/aws_firehose/component_test.go index 05fa1ad0e8..8966a61113 100644 --- a/internal/component/loki/source/aws_firehose/component_test.go +++ b/internal/component/loki/source/aws_firehose/component_test.go @@ -18,7 +18,7 @@ import ( "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" fnet "github.com/grafana/alloy/internal/component/common/net" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_config "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/util" ) @@ -59,7 +59,7 @@ func (r *receiver) run(ctx context.Context) { func TestComponent(t *testing.T) { opts := component.Options{ ID: "loki.source.awsfirehose", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } @@ -137,7 +137,7 @@ func TestComponent(t *testing.T) { func TestComponent_UpdateWithNewArguments(t *testing.T) { opts := component.Options{ ID: "loki.source.awsfirehose", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } @@ -169,13 +169,13 @@ func TestComponent_UpdateWithNewArguments(t *testing.T) { GRPC: &fnet.GRPCConfig{ListenPort: 0}, } args.ForwardTo = []loki.LogsReceiver{ch1} - args.RelabelRules = flow_relabel.Rules{ + args.RelabelRules = alloy_config.Rules{ { SourceLabels: []string{"__aws_firehose_source_arn"}, - Regex: flow_relabel.Regexp{Regexp: regexp.MustCompile("(.*)")}, + Regex: alloy_config.Regexp{Regexp: regexp.MustCompile("(.*)")}, Replacement: "$1", TargetLabel: "source_arn", - Action: flow_relabel.Replace, + Action: alloy_config.Replace, }, } diff --git a/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go b/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go index 6a420a066f..45b3413f36 100644 --- a/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go +++ b/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/alloy/internal/alloy/logging/level" "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/loki/source/azure_event_hubs/internal/parser" kt "github.com/grafana/alloy/internal/component/loki/source/internal/kafkatarget" "github.com/grafana/alloy/internal/featuregate" @@ -38,12 +38,12 @@ type Arguments struct { Authentication AzureEventHubsAuthentication `alloy:"authentication,block"` - GroupID string `alloy:"group_id,attr,optional"` - UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` - DisallowCustomMessages bool `alloy:"disallow_custom_messages,attr,optional"` - RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` - Labels map[string]string `alloy:"labels,attr,optional"` - Assignor string `alloy:"assignor,attr,optional"` + GroupID string `alloy:"group_id,attr,optional"` + UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` + DisallowCustomMessages bool `alloy:"disallow_custom_messages,attr,optional"` + RelabelRules alloy_relabel.Rules `alloy:"relabel_rules,attr,optional"` + Labels map[string]string `alloy:"labels,attr,optional"` + Assignor string `alloy:"assignor,attr,optional"` ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` } @@ -163,7 +163,7 @@ func (a *Arguments) Convert() (kt.Config, error) { } cfg := kt.Config{ - RelabelConfigs: flow_relabel.ComponentToPromRelabelConfigs(a.RelabelRules), + RelabelConfigs: alloy_relabel.ComponentToPromRelabelConfigs(a.RelabelRules), KafkaConfig: kt.TargetConfig{ Brokers: []string{a.FullyQualifiedNamespace}, Topics: a.EventHubs, diff --git a/internal/component/loki/source/docker/docker.go b/internal/component/loki/source/docker/docker.go index 05f9af5097..f669bd46c7 100644 --- a/internal/component/loki/source/docker/docker.go +++ b/internal/component/loki/source/docker/docker.go @@ -20,7 +20,7 @@ import ( types "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/alloy/internal/component/common/loki/positions" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/discovery" dt "github.com/grafana/alloy/internal/component/loki/source/docker/internal/dockertarget" "github.com/grafana/alloy/internal/featuregate" @@ -57,7 +57,7 @@ type Arguments struct { Targets []discovery.Target `alloy:"targets,attr"` ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` Labels map[string]string `alloy:"labels,attr,optional"` - RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + RelabelRules alloy_relabel.Rules `alloy:"relabel_rules,attr,optional"` HTTPClientConfig *types.HTTPClientConfig `alloy:"http_client_config,block,optional"` RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` } @@ -210,7 +210,7 @@ func (c *Component) Update(args component.Arguments) error { c.defaultLabels = defaultLabels if newArgs.RelabelRules != nil && len(newArgs.RelabelRules) > 0 { - c.rcs = flow_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) + c.rcs = alloy_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) } else { c.rcs = []*relabel.Config{} } diff --git a/internal/component/loki/source/docker/docker_test.go b/internal/component/loki/source/docker/docker_test.go index 7fbfada83d..e020bcda09 100644 --- a/internal/component/loki/source/docker/docker_test.go +++ b/internal/component/loki/source/docker/docker_test.go @@ -65,7 +65,7 @@ func TestDuplicateTargets(t *testing.T) { cmp, err := New(component.Options{ ID: "loki.source.docker.test", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), DataPath: t.TempDir(), }, args) diff --git a/internal/component/loki/source/file/file_test.go b/internal/component/loki/source/file/file_test.go index ad7a8c030d..816a81c738 100644 --- a/internal/component/loki/source/file/file_test.go +++ b/internal/component/loki/source/file/file_test.go @@ -175,7 +175,7 @@ func TestUpdate_NoLeak(t *testing.T) { func TestTwoTargets(t *testing.T) { // Create opts for component opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, DataPath: t.TempDir(), @@ -250,7 +250,7 @@ func TestTwoTargets(t *testing.T) { func TestEncoding(t *testing.T) { // Create opts for component opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, DataPath: t.TempDir(), diff --git a/internal/component/loki/source/file/tailer.go b/internal/component/loki/source/file/tailer.go index ebab33277a..78f3732351 100644 --- a/internal/component/loki/source/file/tailer.go +++ b/internal/component/loki/source/file/tailer.go @@ -253,7 +253,7 @@ func (t *tailer) readLines() { if err != nil { level.Debug(t.logger).Log("msg", "failed to convert encoding", "error", err) t.metrics.encodingFailures.WithLabelValues(t.path).Inc() - text = fmt.Sprintf("the requested encoding conversion for this line failed in Grafana Agent Flow: %s", err.Error()) + text = fmt.Sprintf("the requested encoding conversion for this line failed in Alloy: %s", err.Error()) } } else { text = line.Text diff --git a/internal/component/loki/source/gcplog/gcplog.go b/internal/component/loki/source/gcplog/gcplog.go index 836549d8cd..01a18dbc2b 100644 --- a/internal/component/loki/source/gcplog/gcplog.go +++ b/internal/component/loki/source/gcplog/gcplog.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/loki/source/gcplog/gcptypes" gt "github.com/grafana/alloy/internal/component/loki/source/gcplog/internal/gcplogtarget" "github.com/grafana/alloy/internal/util" @@ -37,7 +37,7 @@ type Arguments struct { PullTarget *gcptypes.PullConfig `alloy:"pull,block,optional"` PushTarget *gcptypes.PushConfig `alloy:"push,block,optional"` ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` - RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + RelabelRules alloy_relabel.Rules `alloy:"relabel_rules,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -122,7 +122,7 @@ func (c *Component) Update(args component.Arguments) error { var rcs []*relabel.Config if newArgs.RelabelRules != nil && len(newArgs.RelabelRules) > 0 { - rcs = flow_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) + rcs = alloy_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) } if c.target != nil { diff --git a/internal/component/loki/source/gcplog/gcplog_test.go b/internal/component/loki/source/gcplog/gcplog_test.go index 12fa8c5f1a..d119f0a126 100644 --- a/internal/component/loki/source/gcplog/gcplog_test.go +++ b/internal/component/loki/source/gcplog/gcplog_test.go @@ -17,7 +17,7 @@ import ( "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" fnet "github.com/grafana/alloy/internal/component/common/net" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/loki/source/gcplog/gcptypes" "github.com/grafana/alloy/internal/util" ) @@ -28,7 +28,7 @@ func TestPull(t *testing.T) {} func TestPush(t *testing.T) { opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } @@ -104,27 +104,27 @@ const testPushPayload = ` "subscription": "projects/test-project/subscriptions/test" }` -var exportedRules = flow_relabel.Rules{ +var exportedRules = alloy_relabel.Rules{ { SourceLabels: []string{"__gcp_message_id"}, Regex: mustNewRegexp("(.*)"), - Action: flow_relabel.Replace, + Action: alloy_relabel.Replace, Replacement: "$1", TargetLabel: "message_id", }, { SourceLabels: []string{"__gcp_resource_type"}, Regex: mustNewRegexp("(.*)"), - Action: flow_relabel.Replace, + Action: alloy_relabel.Replace, Replacement: "$1", TargetLabel: "resource_type", }, } -func mustNewRegexp(s string) flow_relabel.Regexp { +func mustNewRegexp(s string) alloy_relabel.Regexp { re, err := regexp.Compile("^(?:" + s + ")$") if err != nil { panic(err) } - return flow_relabel.Regexp{Regexp: re} + return alloy_relabel.Regexp{Regexp: re} } diff --git a/internal/component/loki/source/gelf/gelf.go b/internal/component/loki/source/gelf/gelf.go index 0e1343654e..a7f851dcc9 100644 --- a/internal/component/loki/source/gelf/gelf.go +++ b/internal/component/loki/source/gelf/gelf.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/loki/source/gelf/internal/target" "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" @@ -78,7 +78,7 @@ func (c *Component) Update(args component.Arguments) error { var rcs []*relabel.Config if newArgs.RelabelRules != nil && len(newArgs.RelabelRules) > 0 { - rcs = flow_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) + rcs = alloy_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) } t, err := target.NewTarget(c.metrics, c.o.Logger, c.handler, rcs, convertConfig(newArgs)) @@ -94,7 +94,7 @@ type Arguments struct { // ListenAddress only supports UDP. ListenAddress string `alloy:"listen_address,attr,optional"` UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` - RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + RelabelRules alloy_relabel.Rules `alloy:"relabel_rules,attr,optional"` Receivers []loki.LogsReceiver `alloy:"forward_to,attr"` } diff --git a/internal/component/loki/source/gelf/gelf_test.go b/internal/component/loki/source/gelf/gelf_test.go index b948528d1c..ae0894b197 100644 --- a/internal/component/loki/source/gelf/gelf_test.go +++ b/internal/component/loki/source/gelf/gelf_test.go @@ -18,7 +18,7 @@ import ( func TestGelf(t *testing.T) { opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } diff --git a/internal/component/loki/source/heroku/heroku.go b/internal/component/loki/source/heroku/heroku.go index 800f4a9e82..317bc41093 100644 --- a/internal/component/loki/source/heroku/heroku.go +++ b/internal/component/loki/source/heroku/heroku.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" fnet "github.com/grafana/alloy/internal/component/common/net" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" ht "github.com/grafana/alloy/internal/component/loki/source/heroku/internal/herokutarget" "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/internal/util" @@ -37,7 +37,7 @@ type Arguments struct { Labels map[string]string `alloy:"labels,attr,optional"` UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` - RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + RelabelRules alloy_relabel.Rules `alloy:"relabel_rules,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -123,7 +123,7 @@ func (c *Component) Update(args component.Arguments) error { var rcs []*relabel.Config if newArgs.RelabelRules != nil && len(newArgs.RelabelRules) > 0 { - rcs = flow_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) + rcs = alloy_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) } restartRequired := changed(c.args.Server, newArgs.Server) || diff --git a/internal/component/loki/source/heroku/heroku_test.go b/internal/component/loki/source/heroku/heroku_test.go index a562e10ab1..57955de9be 100644 --- a/internal/component/loki/source/heroku/heroku_test.go +++ b/internal/component/loki/source/heroku/heroku_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" fnet "github.com/grafana/alloy/internal/component/common/net" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/loki/source/heroku/internal/herokutarget" "github.com/grafana/alloy/internal/util" "github.com/grafana/regexp" @@ -120,7 +120,7 @@ func TestUpdate_detectsWhenTargetRequiresARestart(t *testing.T) { name: "change in relabel rules requires server restart", args: testArgsWithPorts(httpPort, grpcPort), newArgs: testArgsWith(t, func(args *Arguments) { - args.RelabelRules = flow_relabel.Rules{} + args.RelabelRules = alloy_relabel.Rules{} args.Server.HTTP.ListenPort = httpPort args.Server.GRPC.ListenPort = grpcPort }), @@ -166,32 +166,32 @@ func TestUpdate_detectsWhenTargetRequiresARestart(t *testing.T) { const testPayload = `270 <158>1 2022-06-13T14:52:23.622778+00:00 host heroku router - at=info method=GET path="/" host=cryptic-cliffs-27764.herokuapp.com request_id=59da6323-2bc4-4143-8677-cc66ccfb115f fwd="181.167.87.140" dyno=web.1 connect=0ms service=3ms status=200 bytes=6979 protocol=https ` -var rulesExport = flow_relabel.Rules{ +var rulesExport = alloy_relabel.Rules{ { SourceLabels: []string{"__heroku_drain_host"}, Regex: newRegexp(), - Action: flow_relabel.Replace, + Action: alloy_relabel.Replace, Replacement: "$1", TargetLabel: "host", }, { SourceLabels: []string{"__heroku_drain_app"}, Regex: newRegexp(), - Action: flow_relabel.Replace, + Action: alloy_relabel.Replace, Replacement: "$1", TargetLabel: "app", }, { SourceLabels: []string{"__heroku_drain_proc"}, Regex: newRegexp(), - Action: flow_relabel.Replace, + Action: alloy_relabel.Replace, Replacement: "$1", TargetLabel: "proc", }, { SourceLabels: []string{"__heroku_drain_log_id"}, Regex: newRegexp(), - Action: flow_relabel.Replace, + Action: alloy_relabel.Replace, Replacement: "$1", TargetLabel: "log_id", }, @@ -199,7 +199,7 @@ var rulesExport = flow_relabel.Rules{ func defaultOptions(t *testing.T) component.Options { return component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } @@ -219,11 +219,11 @@ func testArgsWithPorts(httpPort int, grpcPort int) Arguments { }, ForwardTo: []loki.LogsReceiver{loki.NewLogsReceiver(), loki.NewLogsReceiver()}, Labels: map[string]string{"foo": "bar", "fizz": "buzz"}, - RelabelRules: flow_relabel.Rules{ + RelabelRules: alloy_relabel.Rules{ { SourceLabels: []string{"tag"}, - Regex: flow_relabel.Regexp{Regexp: regexp.MustCompile("ignore")}, - Action: flow_relabel.Drop, + Regex: alloy_relabel.Regexp{Regexp: regexp.MustCompile("ignore")}, + Action: alloy_relabel.Drop, }, }, UseIncomingTimestamp: false, @@ -252,12 +252,12 @@ func getFreePort(t *testing.T) int { return port } -func newRegexp() flow_relabel.Regexp { +func newRegexp() alloy_relabel.Regexp { re, err := regexp.Compile("^(?:(.*))$") if err != nil { panic(err) } - return flow_relabel.Regexp{Regexp: re} + return alloy_relabel.Regexp{Regexp: re} } func getEndpoint(target *herokutarget.HerokuTarget) string { diff --git a/internal/component/loki/source/journal/journal.go b/internal/component/loki/source/journal/journal.go index 971f6bfe4e..3617e79bb9 100644 --- a/internal/component/loki/source/journal/journal.go +++ b/internal/component/loki/source/journal/journal.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/alloy/internal/component/common/loki/positions" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/loki/source/journal/internal/target" "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" @@ -112,7 +112,7 @@ func (c *Component) Update(args component.Arguments) error { return err } } - rcs := flow_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) + rcs := alloy_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) entryHandler := loki.NewEntryHandler(c.handler, func() {}) newTarget, err := target.NewJournalTarget(c.metrics, c.o.Logger, entryHandler, c.positions, c.o.ID, rcs, convertArgs(c.o.ID, newArgs)) diff --git a/internal/component/loki/source/journal/journal_test.go b/internal/component/loki/source/journal/journal_test.go index d4da349bdb..f4de1fe5c4 100644 --- a/internal/component/loki/source/journal/journal_test.go +++ b/internal/component/loki/source/journal/journal_test.go @@ -22,7 +22,7 @@ func TestJournal(t *testing.T) { lr := loki.NewLogsReceiver() c, err := New(component.Options{ ID: "loki.source.journal.test", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), DataPath: tmp, Registerer: prometheus.DefaultRegisterer, }, Arguments{ diff --git a/internal/component/loki/source/journal/types.go b/internal/component/loki/source/journal/types.go index d4b1bd673b..35f54b56ec 100644 --- a/internal/component/loki/source/journal/types.go +++ b/internal/component/loki/source/journal/types.go @@ -4,7 +4,7 @@ import ( "time" "github.com/grafana/alloy/internal/component/common/loki" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" ) // Arguments are the arguments for the component. @@ -12,7 +12,7 @@ type Arguments struct { FormatAsJson bool `alloy:"format_as_json,attr,optional"` MaxAge time.Duration `alloy:"max_age,attr,optional"` Path string `alloy:"path,attr,optional"` - RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + RelabelRules alloy_relabel.Rules `alloy:"relabel_rules,attr,optional"` Matches string `alloy:"matches,attr,optional"` Receivers []loki.LogsReceiver `alloy:"forward_to,attr"` Labels map[string]string `alloy:"labels,attr,optional"` diff --git a/internal/component/loki/source/kafka/kafka.go b/internal/component/loki/source/kafka/kafka.go index 9167b24137..65bde3c2e8 100644 --- a/internal/component/loki/source/kafka/kafka.go +++ b/internal/component/loki/source/kafka/kafka.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/internal/component/common/loki" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" kt "github.com/grafana/alloy/internal/component/loki/source/internal/kafkatarget" "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" @@ -42,7 +42,7 @@ type Arguments struct { Labels map[string]string `alloy:"labels,attr,optional"` ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` - RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + RelabelRules alloy_relabel.Rules `alloy:"relabel_rules,attr,optional"` } // KafkaAuthentication describe the configuration for authentication with Kafka brokers @@ -190,7 +190,7 @@ func (args *Arguments) Convert() kt.Config { Assignor: args.Assignor, Authentication: args.Authentication.Convert(), }, - RelabelConfigs: flow_relabel.ComponentToPromRelabelConfigs(args.RelabelRules), + RelabelConfigs: alloy_relabel.ComponentToPromRelabelConfigs(args.RelabelRules), } } diff --git a/internal/component/loki/source/syslog/syslog.go b/internal/component/loki/source/syslog/syslog.go index 5c4643df3d..695ff5269e 100644 --- a/internal/component/loki/source/syslog/syslog.go +++ b/internal/component/loki/source/syslog/syslog.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/alloy/internal/alloy/logging/level" "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" st "github.com/grafana/alloy/internal/component/loki/source/syslog/internal/syslogtarget" "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/prometheus/model/relabel" @@ -31,7 +31,7 @@ func init() { type Arguments struct { SyslogListeners []ListenerConfig `alloy:"listener,block"` ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` - RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + RelabelRules alloy_relabel.Rules `alloy:"relabel_rules,attr,optional"` } // Component implements the loki.source.syslog component. @@ -102,7 +102,7 @@ func (c *Component) Update(args component.Arguments) error { var rcs []*relabel.Config if newArgs.RelabelRules != nil && len(newArgs.RelabelRules) > 0 { - rcs = flow_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) + rcs = alloy_relabel.ComponentToPromRelabelConfigs(newArgs.RelabelRules) } if listenersChanged(c.args.SyslogListeners, newArgs.SyslogListeners) || relabelRulesChanged(c.args.RelabelRules, newArgs.RelabelRules) { @@ -159,6 +159,6 @@ type listenerInfo struct { func listenersChanged(prev, next []ListenerConfig) bool { return !reflect.DeepEqual(prev, next) } -func relabelRulesChanged(prev, next flow_relabel.Rules) bool { +func relabelRulesChanged(prev, next alloy_relabel.Rules) bool { return !reflect.DeepEqual(prev, next) } diff --git a/internal/component/loki/source/syslog/syslog_test.go b/internal/component/loki/source/syslog/syslog_test.go index a6dc560035..a15ed4f7b6 100644 --- a/internal/component/loki/source/syslog/syslog_test.go +++ b/internal/component/loki/source/syslog/syslog_test.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/util" "github.com/grafana/regexp" "github.com/phayes/freeport" @@ -21,7 +21,7 @@ import ( func Test(t *testing.T) { opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } @@ -103,7 +103,7 @@ func Test(t *testing.T) { func TestWithRelabelRules(t *testing.T) { opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, } @@ -121,16 +121,16 @@ func TestWithRelabelRules(t *testing.T) { args.ForwardTo = []loki.LogsReceiver{ch1} // Create a handler which will be used to retrieve relabeling rules. - args.RelabelRules = []*flow_relabel.Config{ + args.RelabelRules = []*alloy_relabel.Config{ { SourceLabels: []string{"__name__"}, Regex: mustNewRegexp("__syslog_(.*)"), - Action: flow_relabel.LabelMap, + Action: alloy_relabel.LabelMap, Replacement: "syslog_${1}", }, { Regex: mustNewRegexp("syslog_connection_hostname"), - Action: flow_relabel.LabelDrop, + Action: alloy_relabel.LabelDrop, }, } @@ -194,10 +194,10 @@ var ( fmtNewline = func(s string) string { return s + "\n" } ) -func mustNewRegexp(s string) flow_relabel.Regexp { +func mustNewRegexp(s string) alloy_relabel.Regexp { re, err := regexp.Compile("^(?:" + s + ")$") if err != nil { panic(err) } - return flow_relabel.Regexp{Regexp: re} + return alloy_relabel.Regexp{Regexp: re} } diff --git a/internal/component/loki/source/windowsevent/component_test.go b/internal/component/loki/source/windowsevent/component_test.go index 45a4f15d50..ebd35bab57 100644 --- a/internal/component/loki/source/windowsevent/component_test.go +++ b/internal/component/loki/source/windowsevent/component_test.go @@ -27,7 +27,7 @@ func TestEventLogger(t *testing.T) { rec := loki.NewLogsReceiver() c, err := New(component.Options{ ID: "loki.source.windowsevent.test", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), DataPath: dataPath, OnStateChange: func(e component.Exports) { diff --git a/internal/component/otelcol/auth/auth.go b/internal/component/otelcol/auth/auth.go index 6220f8fe54..ed6d857def 100644 --- a/internal/component/otelcol/auth/auth.go +++ b/internal/component/otelcol/auth/auth.go @@ -1,7 +1,7 @@ -// Package auth provides utilities to create a Flow component from +// Package auth provides utilities to create an Alloy component from // OpenTelemetry Collector authentication extensions. // -// Other OpenTelemetry Collector extensions are better served as generic Flow +// Other OpenTelemetry Collector extensions are better served as generic Alloy // components rather than being placed in the otelcol namespace. package auth @@ -40,7 +40,7 @@ type Arguments interface { Exporters() map[otelcomponent.DataType]map[otelcomponent.ID]otelcomponent.Component } -// Exports is a common Exports type for Flow components which expose +// Exports is a common Exports type for Alloy components which expose // OpenTelemetry Collector authentication extensions. type Exports struct { // Handler is the managed component. Handler is updated any time the @@ -59,7 +59,7 @@ var _ syntax.Capsule = Handler{} // RiverCapsule marks Handler as a capsule type. func (Handler) RiverCapsule() {} -// Auth is a Flow component shim which manages an OpenTelemetry Collector +// Auth is an Alloy component shim which manages an OpenTelemetry Collector // authentication extension. type Auth struct { ctx context.Context @@ -77,9 +77,9 @@ var ( _ component.HealthComponent = (*Auth)(nil) ) -// New creates a new Flow component which encapsulates an OpenTelemetry +// New creates a new Alloy component which encapsulates an OpenTelemetry // Collector authentication extension. args must hold a value of the argument -// type registered with the Flow component. +// type registered with the Alloy component. // // The registered component must be registered to export the Exports type from // this package, otherwise New will panic. diff --git a/internal/component/otelcol/auth/auth_test.go b/internal/component/otelcol/auth/auth_test.go index 23a4ec1b01..d02c6966c3 100644 --- a/internal/component/otelcol/auth/auth_test.go +++ b/internal/component/otelcol/auth/auth_test.go @@ -23,7 +23,7 @@ func TestAuth(t *testing.T) { } ) - // Create and start our Flow component. We then wait for it to export a + // Create and start our Alloy component. We then wait for it to export a // consumer that we can send data to. te := newTestEnvironment(t, onCreated) te.Start(fakeAuthArgs{}) diff --git a/internal/component/otelcol/connector/connector.go b/internal/component/otelcol/connector/connector.go index 63a1a38979..4b229a54c5 100644 --- a/internal/component/otelcol/connector/connector.go +++ b/internal/component/otelcol/connector/connector.go @@ -1,4 +1,4 @@ -// Package connector exposes utilities to create a Flow component from +// Package connector exposes utilities to create an Alloy component from // OpenTelemetry Collector connectors. package connector @@ -58,8 +58,8 @@ type Arguments interface { ConnectorType() int } -// Connector is a Flow component shim which manages an OpenTelemetry Collector -// connector component. +// Connector is an Alloy component shim which manages an OpenTelemetry +// Collector connector component. type Connector struct { ctx context.Context cancel context.CancelFunc @@ -77,9 +77,9 @@ var ( _ component.HealthComponent = (*Connector)(nil) ) -// New creates a new Flow component which encapsulates an OpenTelemetry +// New creates a new Alloy component which encapsulates an OpenTelemetry // Collector connector. args must hold a value of the argument type registered -// with the Flow component. +// with the Alloy component. // // The registered component must be registered to export the // otelcol.ConsumerExports type, otherwise New will panic. diff --git a/internal/component/otelcol/consumer.go b/internal/component/otelcol/consumer.go index c7a385ba6f..e538b5a5ba 100644 --- a/internal/component/otelcol/consumer.go +++ b/internal/component/otelcol/consumer.go @@ -12,7 +12,7 @@ type Consumer interface { otelconsumer.Logs } -// ConsumerArguments is a common Arguments type for Flow components which can +// ConsumerArguments is a common Arguments type for Alloy components which can // send data to otelcol consumers. // // It is expected to use ConsumerArguments as a block within the top-level @@ -23,7 +23,7 @@ type ConsumerArguments struct { Traces []Consumer `alloy:"traces,attr,optional"` } -// ConsumerExports is a common Exports type for Flow components which are +// ConsumerExports is a common Exports type for Alloy components which are // otelcol processors or otelcol exporters. type ConsumerExports struct { Input Consumer `alloy:"input,attr"` diff --git a/internal/component/otelcol/exporter/exporter.go b/internal/component/otelcol/exporter/exporter.go index 3cbdfa3fa3..dde372ed76 100644 --- a/internal/component/otelcol/exporter/exporter.go +++ b/internal/component/otelcol/exporter/exporter.go @@ -1,4 +1,4 @@ -// Package exporter exposes utilities to create a Flow component from +// Package exporter exposes utilities to create an Alloy component from // OpenTelemetry Collector exporters. package exporter @@ -71,7 +71,7 @@ func (s TypeSignal) SupportsTraces() bool { return s&TypeTraces != 0 } -// Exporter is a Flow component shim which manages an OpenTelemetry Collector +// Exporter is an Alloy component shim which manages an OpenTelemetry Collector // exporter component. type Exporter struct { ctx context.Context @@ -94,9 +94,9 @@ var ( _ component.HealthComponent = (*Exporter)(nil) ) -// New creates a new Flow component which encapsulates an OpenTelemetry -// Collector exporter. args must hold a value of the argument type registered -// with the Flow component. +// New creates a new component which encapsulates an OpenTelemetry Collector +// exporter. args must hold a value of the argument type registered with the +// Alloy component. // // The registered component must be registered to export the // otelcol.ConsumerExports type, otherwise New will panic. diff --git a/internal/component/otelcol/exporter/exporter_test.go b/internal/component/otelcol/exporter/exporter_test.go index 01c0657b33..ee0306abd7 100644 --- a/internal/component/otelcol/exporter/exporter_test.go +++ b/internal/component/otelcol/exporter/exporter_test.go @@ -26,7 +26,7 @@ func TestExporter(t *testing.T) { tracesCh := make(chan ptrace.Traces, 1) // Create an instance of a fake OpenTelemetry Collector exporter which our - // Flow component will wrap around. + // Alloy component will wrap around. innerExporter := &fakeExporter{ ConsumeTracesFunc: func(_ context.Context, td ptrace.Traces) error { select { @@ -37,7 +37,7 @@ func TestExporter(t *testing.T) { }, } - // Create and start our Flow component. We then wait for it to export a + // Create and start our Alloy component. We then wait for it to export a // consumer that we can send data to. te := newTestEnvironment(t, innerExporter) te.Start() diff --git a/internal/component/otelcol/exporter/loki/internal/convert/convert_test.go b/internal/component/otelcol/exporter/loki/internal/convert/convert_test.go index b7e8fbec10..0bfa428de2 100644 --- a/internal/component/otelcol/exporter/loki/internal/convert/convert_test.go +++ b/internal/component/otelcol/exporter/loki/internal/convert/convert_test.go @@ -312,7 +312,7 @@ func TestConsumeLogs(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) promReg := prometheus.NewRegistry() receiver := loki.NewLogsReceiverWithChannel(make(chan loki.Entry, maxTestedLogEntries)) diff --git a/internal/component/otelcol/extension/extension.go b/internal/component/otelcol/extension/extension.go index a832e7cc32..f0ba0fb249 100644 --- a/internal/component/otelcol/extension/extension.go +++ b/internal/component/otelcol/extension/extension.go @@ -1,7 +1,7 @@ -// Package extension provides utilities to create a Flow component from +// Package extension provides utilities to create an Alloy component from // OpenTelemetry Collector extensions. // -// Other OpenTelemetry Collector extensions are better served as generic Flow +// Other OpenTelemetry Collector extensions are better served as generic Alloy // components rather than being placed in the otelcol namespace. package extension @@ -39,8 +39,8 @@ type Arguments interface { Exporters() map[otelcomponent.DataType]map[otelcomponent.ID]otelcomponent.Component } -// Extension is a Flow component shim which manages an OpenTelemetry Collector -// extension. +// Extension is an Alloy component shim which manages an OpenTelemetry +// Collector extension. type Extension struct { ctx context.Context cancel context.CancelFunc @@ -57,9 +57,9 @@ var ( _ component.HealthComponent = (*Extension)(nil) ) -// New creates a new Flow component which encapsulates an OpenTelemetry +// New creates a new Alloy component which encapsulates an OpenTelemetry // Collector extension. args must hold a value of the argument -// type registered with the Flow component. +// type registered with the Alloy component. func New(opts component.Options, f otelextension.Factory, args Arguments) (*Extension, error) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/internal/component/otelcol/extension/extension_test.go b/internal/component/otelcol/extension/extension_test.go index 34e8855220..20c56268b2 100644 --- a/internal/component/otelcol/extension/extension_test.go +++ b/internal/component/otelcol/extension/extension_test.go @@ -23,7 +23,7 @@ func TestExtension(t *testing.T) { } ) - // Create and start our Flow component. We then wait for it to export a + // Create and start our Alloy component. We then wait for it to export a // consumer that we can send data to. te := newTestEnvironment(t, onCreated) te.Start(fakeExtensionArgs{}) diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config.go index 75e9071702..d064116681 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config.go @@ -48,8 +48,8 @@ type Source struct { // ReloadInterval determines the periodicity to refresh the strategies ReloadInterval time.Duration `mapstructure:"reload_interval"` - // Contents is a field added for the Grafana Agent that allows dynamic mapping of sampling rules - // through flow + // Contents is a field added for Alloy that allows dynamic mapping of + // sampling rules. Contents string `mapstructure:"contents"` } diff --git a/internal/component/otelcol/internal/scheduler/host.go b/internal/component/otelcol/internal/scheduler/host.go index bac33cccce..9a4d823df3 100644 --- a/internal/component/otelcol/internal/scheduler/host.go +++ b/internal/component/otelcol/internal/scheduler/host.go @@ -8,7 +8,7 @@ import ( otelextension "go.opentelemetry.io/collector/extension" ) -// Host implements otelcomponent.Host for Grafana Agent Flow. +// Host implements otelcomponent.Host for Grafana Alloy. type Host struct { log log.Logger diff --git a/internal/component/otelcol/internal/scheduler/scheduler.go b/internal/component/otelcol/internal/scheduler/scheduler.go index f2c891a17a..a6178e488f 100644 --- a/internal/component/otelcol/internal/scheduler/scheduler.go +++ b/internal/component/otelcol/internal/scheduler/scheduler.go @@ -16,8 +16,8 @@ import ( ) // Scheduler implements manages a set of OpenTelemetry Collector components. -// Scheduler is intended to be used from Flow components which need to schedule -// OpenTelemetry Collector components; it does not implement the full +// Scheduler is intended to be used from Alloy components which need to +// schedule OpenTelemetry Collector components; it does not implement the full // component.Component interface. // // Each OpenTelemetry Collector component has one instance per supported diff --git a/internal/component/otelcol/processor/processor.go b/internal/component/otelcol/processor/processor.go index 449b09dca8..528e56fad4 100644 --- a/internal/component/otelcol/processor/processor.go +++ b/internal/component/otelcol/processor/processor.go @@ -1,4 +1,4 @@ -// Package processor exposes utilities to create a Flow component from +// Package processor exposes utilities to create an Alloy component from // OpenTelemetry Collector processors. package processor @@ -44,8 +44,8 @@ type Arguments interface { NextConsumers() *otelcol.ConsumerArguments } -// Processor is a Flow component shim which manages an OpenTelemetry Collector -// processor component. +// Processor is an Alloy component shim which manages an OpenTelemetry +// Collector processor component. type Processor struct { ctx context.Context cancel context.CancelFunc @@ -63,9 +63,9 @@ var ( _ component.HealthComponent = (*Processor)(nil) ) -// New creates a new Flow component which encapsulates an OpenTelemetry +// New creates a new Alloy component which encapsulates an OpenTelemetry // Collector processor. args must hold a value of the argument type registered -// with the Flow component. +// with the Alloy component. // // The registered component must be registered to export the // otelcol.ConsumerExports type, otherwise New will panic. diff --git a/internal/component/otelcol/processor/processor_test.go b/internal/component/otelcol/processor/processor_test.go index e6aee3974f..d76598fc6c 100644 --- a/internal/component/otelcol/processor/processor_test.go +++ b/internal/component/otelcol/processor/processor_test.go @@ -24,7 +24,7 @@ func TestProcessor(t *testing.T) { ctx := componenttest.TestContext(t) // Create an instance of a fake OpenTelemetry Collector processor which our - // Flow component will wrap around. Our fake processor will immediately + // Alloy component will wrap around. Our fake processor will immediately // forward data to the connected consumer once one is made available to it. var ( consumer otelconsumer.Traces @@ -53,7 +53,7 @@ func TestProcessor(t *testing.T) { } ) - // Create and start our Flow component. We then wait for it to export a + // Create and start our Alloy component. We then wait for it to export a // consumer that we can send data to. te := newTestEnvironment(t, innerProcessor, onTracesConsumer) te.Start(fakeProcessorArgs{ diff --git a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go index c0e4b0d00a..7c7f1320b4 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go @@ -26,7 +26,7 @@ type Config struct { Token alloytypes.Secret `alloy:"token,attr,optional"` // TokenFile is not necessary in River because users can use the local.file - // Flow component instead. + // Alloy component instead. // // TokenFile string `alloy:"token_file"` diff --git a/internal/component/otelcol/processor/span/span_test.go b/internal/component/otelcol/processor/span/span_test.go index aec4b2d240..3c10842f1b 100644 --- a/internal/component/otelcol/processor/span/span_test.go +++ b/internal/component/otelcol/processor/span/span_test.go @@ -16,12 +16,12 @@ import ( func TestArguments_UnmarshalRiver(t *testing.T) { tests := []struct { - flowCfg string + alloyCfg string otelCfg map[string]interface{} expectUnmarshalError bool }{ { - flowCfg: ` + alloyCfg: ` name { separator = "::" from_attributes = ["db.svc", "operation", "id"] @@ -37,7 +37,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { }, }, { - flowCfg: ` + alloyCfg: ` name { from_attributes = ["db.svc", "operation", "id"] } @@ -51,7 +51,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { }, }, { - flowCfg: ` + alloyCfg: ` name { to_attributes { rules = ["^\\/api\\/v1\\/document\\/(?P.*)\\/update$"] @@ -69,7 +69,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { }, }, { - flowCfg: ` + alloyCfg: ` include { match_type = "regexp" services = ["banks"] @@ -105,7 +105,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { }, }, { - flowCfg: ` + alloyCfg: ` status { code = "Error" description = "some additional error description" @@ -121,7 +121,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { }, }, { - flowCfg: ` + alloyCfg: ` include { match_type = "strict" attribute { @@ -154,7 +154,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { var args span.Arguments - err := syntax.Unmarshal([]byte(tc.flowCfg), &args) + err := syntax.Unmarshal([]byte(tc.alloyCfg), &args) if tc.expectUnmarshalError { require.Error(t, err) diff --git a/internal/component/otelcol/receiver/prometheus/prometheus_test.go b/internal/component/otelcol/receiver/prometheus/prometheus_test.go index a7a5acc98e..d4cf331c80 100644 --- a/internal/component/otelcol/receiver/prometheus/prometheus_test.go +++ b/internal/component/otelcol/receiver/prometheus/prometheus_test.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/alloy/internal/component/otelcol" "github.com/grafana/alloy/internal/component/otelcol/internal/fakeconsumer" "github.com/grafana/alloy/internal/component/otelcol/receiver/prometheus" - flowprometheus "github.com/grafana/alloy/internal/component/prometheus" + alloyprometheus "github.com/grafana/alloy/internal/component/prometheus" "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" @@ -80,7 +80,7 @@ func Test(t *testing.T) { } ctx := context.Background() - ctx = scrape.ContextWithMetricMetadataStore(ctx, flowprometheus.NoopMetadataStore{}) + ctx = scrape.ContextWithMetricMetadataStore(ctx, alloyprometheus.NoopMetadataStore{}) ctx = scrape.ContextWithTarget(ctx, &scrape.Target{}) app := exports.Receiver.Appender(ctx) _, err := app.Append(0, l, ts, v) diff --git a/internal/component/otelcol/receiver/receiver.go b/internal/component/otelcol/receiver/receiver.go index 2591b5f4e4..d42754943a 100644 --- a/internal/component/otelcol/receiver/receiver.go +++ b/internal/component/otelcol/receiver/receiver.go @@ -1,4 +1,4 @@ -// Package receiver utilities to create a Flow component from OpenTelemetry +// Package receiver utilities to create an Alloy component from OpenTelemetry // Collector receivers. package receiver @@ -47,7 +47,7 @@ type Arguments interface { DebugMetricsConfig() otelcol.DebugMetricsArguments } -// Receiver is a Flow component shim which manages an OpenTelemetry Collector +// Receiver is an Alloy component shim which manages an OpenTelemetry Collector // receiver component. type Receiver struct { ctx context.Context @@ -65,11 +65,11 @@ var ( _ component.HealthComponent = (*Receiver)(nil) ) -// New creates a new Flow component which encapsulates an OpenTelemetry +// New creates a new Alloy component which encapsulates an OpenTelemetry // Collector receiver. args must hold a value of the argument type registered -// with the Flow component. +// with the Alloy component. // -// If the registered Flow component registers exported fields, it is the +// If the registered Alloy component registers exported fields, it is the // responsibility of the caller to export values when needed; the Receiver // component never exports any values. func New(opts component.Options, f otelreceiver.Factory, args Arguments) (*Receiver, error) { diff --git a/internal/component/otelcol/receiver/receiver_test.go b/internal/component/otelcol/receiver/receiver_test.go index beb21dcf59..3b4f72d4e1 100644 --- a/internal/component/otelcol/receiver/receiver_test.go +++ b/internal/component/otelcol/receiver/receiver_test.go @@ -38,7 +38,7 @@ func TestReceiver(t *testing.T) { } ) - // Create and start our Flow component. We then wait for it to export a + // Create and start our Alloy component. We then wait for it to export a // consumer that we can send data to. te := newTestEnvironment(t, onTracesConsumer) te.Start(fakeReceiverArgs{ diff --git a/internal/component/prometheus/exporter/mysql/mysql_test.go b/internal/component/prometheus/exporter/mysql/mysql_test.go index 4018d30511..3e61ccbd99 100644 --- a/internal/component/prometheus/exporter/mysql/mysql_test.go +++ b/internal/component/prometheus/exporter/mysql/mysql_test.go @@ -152,7 +152,7 @@ func TestRiverConfigConvert(t *testing.T) { require.False(t, c.MySQLUserPrivileges) } -// Checks that the flow and static default configs have not drifted +// Checks that the configs have not drifted between Grafana Agent static mode and Alloy. func TestDefaultsSame(t *testing.T) { convertedDefaults := DefaultArguments.Convert() require.Equal(t, mysqld_exporter.DefaultConfig, *convertedDefaults) diff --git a/internal/component/prometheus/exporter/process/process_test.go b/internal/component/prometheus/exporter/process/process_test.go index a7da8afc03..03f8bca71e 100644 --- a/internal/component/prometheus/exporter/process/process_test.go +++ b/internal/component/prometheus/exporter/process/process_test.go @@ -11,8 +11,8 @@ import ( func TestRiverConfigUnmarshal(t *testing.T) { var exampleRiverConfig = ` matcher { - name = "flow" - comm = ["grafana-agent"] + name = "alloy" + comm = ["alloy"] cmdline = ["*run*"] } track_children = false @@ -32,8 +32,8 @@ func TestRiverConfigUnmarshal(t *testing.T) { expected := []MatcherGroup{ { - Name: "flow", - CommRules: []string{"grafana-agent"}, + Name: "alloy", + CommRules: []string{"alloy"}, CmdlineRules: []string{"*run*"}, }, } diff --git a/internal/component/prometheus/fanout.go b/internal/component/prometheus/fanout.go index 98886cb787..323df69fcc 100644 --- a/internal/component/prometheus/fanout.go +++ b/internal/component/prometheus/fanout.go @@ -18,7 +18,7 @@ import ( var _ storage.Appendable = (*Fanout)(nil) -// Fanout supports the default Flow style of appendables since it can go to multiple outputs. It also allows the intercepting of appends. +// Fanout supports the default Alloy style of appendables since it can go to multiple outputs. It also allows the intercepting of appends. type Fanout struct { mut sync.RWMutex // children is where to fan out. @@ -69,7 +69,7 @@ func (f *Fanout) Appender(ctx context.Context) storage.Appender { // code from the prometheusreceiver which expects the Appender context to // be contain both a scrape target and a metadata store, and fails the // conversion if they are missing. We should find a way around this as both - // Targets and Metadata will be handled in a different way in Flow. + // Targets and Metadata will be handled in a different way in Alloy. ctx = scrape.ContextWithTarget(ctx, &scrape.Target{}) ctx = scrape.ContextWithMetricMetadataStore(ctx, NoopMetadataStore{}) diff --git a/internal/component/prometheus/operator/common/crdmanager.go b/internal/component/prometheus/operator/common/crdmanager.go index 9a08192f39..dd0d946adb 100644 --- a/internal/component/prometheus/operator/common/crdmanager.go +++ b/internal/component/prometheus/operator/common/crdmanager.go @@ -115,9 +115,9 @@ func (c *crdManager) Run(ctx context.Context) error { }() // Start prometheus scrape manager. - flowAppendable := prometheus.NewFanout(c.args.ForwardTo, c.opts.ID, c.opts.Registerer, c.ls) + alloyAppendable := prometheus.NewFanout(c.args.ForwardTo, c.opts.ID, c.opts.Registerer, c.ls) opts := &scrape.Options{} - c.scrapeManager = scrape.NewManager(opts, c.logger, flowAppendable) + c.scrapeManager = scrape.NewManager(opts, c.logger, alloyAppendable) defer c.scrapeManager.Stop() targetSetsChan := make(chan map[string][]*targetgroup.Group) go func() { diff --git a/internal/component/prometheus/operator/configgen/config_gen.go b/internal/component/prometheus/operator/configgen/config_gen.go index 53f7bf5ed6..6630e7c7de 100644 --- a/internal/component/prometheus/operator/configgen/config_gen.go +++ b/internal/component/prometheus/operator/configgen/config_gen.go @@ -6,7 +6,7 @@ import ( "regexp" k8sConfig "github.com/grafana/alloy/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/prometheus/operator" promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" commonConfig "github.com/prometheus/common/config" @@ -19,7 +19,7 @@ import ( type ConfigGenerator struct { Client *k8sConfig.ClientArguments Secrets SecretFetcher - AdditionalRelabelConfigs []*flow_relabel.Config + AdditionalRelabelConfigs []*alloy_relabel.Config ScrapeOptions operator.ScrapeOptions } @@ -235,7 +235,7 @@ func (cg *ConfigGenerator) initRelabelings() relabeler { r := relabeler{} // first add any relabelings from the component config if len(cg.AdditionalRelabelConfigs) > 0 { - for _, c := range flow_relabel.ComponentToPromRelabelConfigs(cg.AdditionalRelabelConfigs) { + for _, c := range alloy_relabel.ComponentToPromRelabelConfigs(cg.AdditionalRelabelConfigs) { r.add(c) } } diff --git a/internal/component/prometheus/operator/configgen/config_gen_podmonitor_test.go b/internal/component/prometheus/operator/configgen/config_gen_podmonitor_test.go index 7f9a6b68f5..b9815519f5 100644 --- a/internal/component/prometheus/operator/configgen/config_gen_podmonitor_test.go +++ b/internal/component/prometheus/operator/configgen/config_gen_podmonitor_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/grafana/alloy/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/prometheus/operator" "github.com/grafana/alloy/internal/util" promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -422,7 +422,7 @@ func TestGeneratePodMonitorConfig(t *testing.T) { t.Run(tc.name, func(t *testing.T) { cg := &ConfigGenerator{ Client: &kubernetes.ClientArguments{}, - AdditionalRelabelConfigs: []*flow_relabel.Config{ + AdditionalRelabelConfigs: []*alloy_relabel.Config{ {TargetLabel: "__meta_foo", Replacement: "bar"}, }, ScrapeOptions: operator.ScrapeOptions{ diff --git a/internal/component/prometheus/operator/configgen/config_gen_probe_test.go b/internal/component/prometheus/operator/configgen/config_gen_probe_test.go index 41d59321aa..d4e2a2afca 100644 --- a/internal/component/prometheus/operator/configgen/config_gen_probe_test.go +++ b/internal/component/prometheus/operator/configgen/config_gen_probe_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/grafana/alloy/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/util" promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" commonConfig "github.com/prometheus/common/config" @@ -188,7 +188,7 @@ func TestGenerateProbeConfig(t *testing.T) { t.Run(tc.name, func(t *testing.T) { cg := &ConfigGenerator{ Client: &kubernetes.ClientArguments{}, - AdditionalRelabelConfigs: []*flow_relabel.Config{ + AdditionalRelabelConfigs: []*alloy_relabel.Config{ {TargetLabel: "__meta_foo", Replacement: "bar"}, }, } diff --git a/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go b/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go index 1e77469bf1..c20d5178b4 100644 --- a/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go +++ b/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/grafana/alloy/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/util" promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" commonConfig "github.com/prometheus/common/config" @@ -414,7 +414,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) { t.Run(tc.name, func(t *testing.T) { cg := &ConfigGenerator{ Client: &kubernetes.ClientArguments{}, - AdditionalRelabelConfigs: []*flow_relabel.Config{ + AdditionalRelabelConfigs: []*alloy_relabel.Config{ {TargetLabel: "__meta_foo", Replacement: "bar"}, }, } diff --git a/internal/component/prometheus/operator/configgen/config_gen_test.go b/internal/component/prometheus/operator/configgen/config_gen_test.go index fad7bfcc83..c64f405428 100644 --- a/internal/component/prometheus/operator/configgen/config_gen_test.go +++ b/internal/component/prometheus/operator/configgen/config_gen_test.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/prometheus/operator" promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" promConfig "github.com/prometheus/common/config" @@ -22,7 +22,7 @@ import ( var ( configGen = &ConfigGenerator{ Secrets: &fakeSecrets{}, - AdditionalRelabelConfigs: []*flow_relabel.Config{ + AdditionalRelabelConfigs: []*alloy_relabel.Config{ {TargetLabel: "__meta_foo", Replacement: "bar"}, }, } diff --git a/internal/component/prometheus/operator/types.go b/internal/component/prometheus/operator/types.go index 14c42410f6..512a5ec72d 100644 --- a/internal/component/prometheus/operator/types.go +++ b/internal/component/prometheus/operator/types.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/prometheus/scrape" "github.com/grafana/alloy/internal/service/cluster" "github.com/prometheus/common/model" @@ -29,7 +29,7 @@ type Arguments struct { Clustering cluster.ComponentBlock `alloy:"clustering,block,optional"` - RelabelConfigs []*flow_relabel.Config `alloy:"rule,block,optional"` + RelabelConfigs []*alloy_relabel.Config `alloy:"rule,block,optional"` Scrape ScrapeOptions `alloy:"scrape,block,optional"` } diff --git a/internal/component/prometheus/receive_http/receive_http_test.go b/internal/component/prometheus/receive_http/receive_http_test.go index 37a1dd2e37..9ce3eee33d 100644 --- a/internal/component/prometheus/receive_http/receive_http_test.go +++ b/internal/component/prometheus/receive_http/receive_http_test.go @@ -382,7 +382,7 @@ func request(ctx context.Context, rawRemoteWriteURL string, req *prompb.WriteReq func testOptions(t *testing.T) component.Options { return component.Options{ ID: "prometheus.receive_http.test", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), GetServiceData: func(name string) (interface{}, error) { return labelstore.New(nil, prometheus.DefaultRegisterer), nil diff --git a/internal/component/prometheus/relabel/relabel.go b/internal/component/prometheus/relabel/relabel.go index 9db46df950..e78596212e 100644 --- a/internal/component/prometheus/relabel/relabel.go +++ b/internal/component/prometheus/relabel/relabel.go @@ -6,7 +6,7 @@ import ( "sync" "github.com/grafana/alloy/internal/component" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/prometheus" "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/internal/service/labelstore" @@ -42,7 +42,7 @@ type Arguments struct { ForwardTo []storage.Appendable `alloy:"forward_to,attr"` // The relabelling rules to apply to each metric before it's forwarded. - MetricRelabelConfigs []*flow_relabel.Config `alloy:"rule,block,optional"` + MetricRelabelConfigs []*alloy_relabel.Config `alloy:"rule,block,optional"` // Cache size to use for LRU cache. CacheSize int `alloy:"max_cache_size,attr,optional"` @@ -65,8 +65,8 @@ func (arg *Arguments) Validate() error { // Exports holds values which are exported by the prometheus.relabel component. type Exports struct { - Receiver storage.Appendable `alloy:"receiver,attr"` - Rules flow_relabel.Rules `alloy:"rules,attr"` + Receiver storage.Appendable `alloy:"receiver,attr"` + Rules alloy_relabel.Rules `alloy:"rules,attr"` } // Component implements the prometheus.relabel component. @@ -218,7 +218,7 @@ func (c *Component) Update(args component.Arguments) error { newArgs := args.(Arguments) c.clearCache(newArgs.CacheSize) - c.mrc = flow_relabel.ComponentToPromRelabelConfigs(newArgs.MetricRelabelConfigs) + c.mrc = alloy_relabel.ComponentToPromRelabelConfigs(newArgs.MetricRelabelConfigs) c.fanout.UpdateChildren(newArgs.ForwardTo) c.opts.OnStateChange(Exports{Receiver: c.receiver, Rules: newArgs.MetricRelabelConfigs}) diff --git a/internal/component/prometheus/relabel/relabel_test.go b/internal/component/prometheus/relabel/relabel_test.go index ff7fded923..9ae7629b1f 100644 --- a/internal/component/prometheus/relabel/relabel_test.go +++ b/internal/component/prometheus/relabel/relabel_test.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/alloy/internal/alloy/componenttest" "github.com/grafana/alloy/internal/component" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/prometheus" "github.com/grafana/alloy/internal/service/labelstore" "github.com/grafana/alloy/internal/util" @@ -45,7 +45,7 @@ func TestUpdateReset(t *testing.T) { require.True(t, relabeller.cache.Len() == 1) _ = relabeller.Update(Arguments{ CacheSize: 100000, - MetricRelabelConfigs: []*flow_relabel.Config{}, + MetricRelabelConfigs: []*alloy_relabel.Config{}, }) require.True(t, relabeller.cache.Len() == 0) } @@ -68,7 +68,7 @@ func TestNil(t *testing.T) { })) relabeller, err := New(component.Options{ ID: "1", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), OnStateChange: func(e component.Exports) {}, Registerer: prom.NewRegistry(), GetServiceData: func(name string) (interface{}, error) { @@ -76,10 +76,10 @@ func TestNil(t *testing.T) { }, }, Arguments{ ForwardTo: []storage.Appendable{fanout}, - MetricRelabelConfigs: []*flow_relabel.Config{ + MetricRelabelConfigs: []*alloy_relabel.Config{ { SourceLabels: []string{"__address__"}, - Regex: flow_relabel.Regexp(relabel.MustNewRegexp("(.+)")), + Regex: alloy_relabel.Regexp(relabel.MustNewRegexp("(.+)")), Action: "drop", }, }, @@ -120,7 +120,7 @@ func BenchmarkCache(b *testing.B) { var entry storage.Appendable _, _ = New(component.Options{ ID: "1", - Logger: util.TestFlowLogger(b), + Logger: util.TestAlloyLogger(b), OnStateChange: func(e component.Exports) { newE := e.(Exports) entry = newE.Receiver @@ -128,10 +128,10 @@ func BenchmarkCache(b *testing.B) { Registerer: prom.NewRegistry(), }, Arguments{ ForwardTo: []storage.Appendable{fanout}, - MetricRelabelConfigs: []*flow_relabel.Config{ + MetricRelabelConfigs: []*alloy_relabel.Config{ { SourceLabels: []string{"__address__"}, - Regex: flow_relabel.Regexp(relabel.MustNewRegexp("(.+)")), + Regex: alloy_relabel.Regexp(relabel.MustNewRegexp("(.+)")), TargetLabel: "new_label", Replacement: "new_value", Action: "replace", @@ -155,7 +155,7 @@ func generateRelabel(t *testing.T) *Component { })) relabeller, err := New(component.Options{ ID: "1", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), OnStateChange: func(e component.Exports) {}, Registerer: prom.NewRegistry(), GetServiceData: func(name string) (interface{}, error) { @@ -163,10 +163,10 @@ func generateRelabel(t *testing.T) *Component { }, }, Arguments{ ForwardTo: []storage.Appendable{fanout}, - MetricRelabelConfigs: []*flow_relabel.Config{ + MetricRelabelConfigs: []*alloy_relabel.Config{ { SourceLabels: []string{"__address__"}, - Regex: flow_relabel.Regexp(relabel.MustNewRegexp("(.+)")), + Regex: alloy_relabel.Regexp(relabel.MustNewRegexp("(.+)")), TargetLabel: "new_label", Replacement: "new_value", Action: "replace", @@ -220,8 +220,8 @@ func TestRuleGetter(t *testing.T) { require.Len(t, gotOriginal, 1) require.Len(t, gotUpdated, 1) - require.Equal(t, gotOriginal[0].Action, flow_relabel.Keep) - require.Equal(t, gotUpdated[0].Action, flow_relabel.Drop) + require.Equal(t, gotOriginal[0].Action, alloy_relabel.Keep) + require.Equal(t, gotUpdated[0].Action, alloy_relabel.Drop) require.Equal(t, gotUpdated[0].SourceLabels, gotOriginal[0].SourceLabels) require.Equal(t, gotUpdated[0].Regex, gotOriginal[0].Regex) } diff --git a/internal/component/prometheus/remotewrite/remote_write.go b/internal/component/prometheus/remotewrite/remote_write.go index 0bc327d4ad..3f22ff38c4 100644 --- a/internal/component/prometheus/remotewrite/remote_write.go +++ b/internal/component/prometheus/remotewrite/remote_write.go @@ -105,7 +105,7 @@ func New(o component.Options, c Arguments) (*Component, error) { // they are responsible for generating ref IDs. This means two // remote_writes may return the same ref ID for two different series. We // treat the remote_write ID as a "local ID" and translate it to a "global - // ID" to ensure Flow compatibility. + // ID" to ensure Alloy compatibility. prometheus.WithAppendHook(func(globalRef storage.SeriesRef, l labels.Labels, t int64, v float64, next storage.Appender) (storage.SeriesRef, error) { if res.exited.Load() { diff --git a/internal/component/prometheus/remotewrite/types.go b/internal/component/prometheus/remotewrite/types.go index 6d365270a7..8ad675fc1f 100644 --- a/internal/component/prometheus/remotewrite/types.go +++ b/internal/component/prometheus/remotewrite/types.go @@ -7,7 +7,7 @@ import ( "time" types "github.com/grafana/alloy/internal/component/common/config" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/syntax/alloytypes" "github.com/google/uuid" @@ -76,7 +76,7 @@ type EndpointOptions struct { HTTPClientConfig *types.HTTPClientConfig `alloy:",squash"` QueueOptions *QueueOptions `alloy:"queue_config,block,optional"` MetadataOptions *MetadataOptions `alloy:"metadata_config,block,optional"` - WriteRelabelConfigs []*flow_relabel.Config `alloy:"write_relabel_config,block,optional"` + WriteRelabelConfigs []*alloy_relabel.Config `alloy:"write_relabel_config,block,optional"` SigV4 *SigV4Config `alloy:"sigv4,block,optional"` AzureAD *AzureADConfig `alloy:"azuread,block,optional"` } @@ -242,7 +242,7 @@ func convertConfigs(cfg Arguments) (*config.Config, error) { SendExemplars: rw.SendExemplars, SendNativeHistograms: rw.SendNativeHistograms, - WriteRelabelConfigs: flow_relabel.ComponentToPromRelabelConfigs(rw.WriteRelabelConfigs), + WriteRelabelConfigs: alloy_relabel.ComponentToPromRelabelConfigs(rw.WriteRelabelConfigs), HTTPClientConfig: *rw.HTTPClientConfig.Convert(), QueueConfig: rw.QueueOptions.toPrometheusType(), MetadataConfig: rw.MetadataOptions.toPrometheusType(), diff --git a/internal/component/prometheus/scrape/scrape.go b/internal/component/prometheus/scrape/scrape.go index d496a881c0..3a3554d95c 100644 --- a/internal/component/prometheus/scrape/scrape.go +++ b/internal/component/prometheus/scrape/scrape.go @@ -157,7 +157,7 @@ func New(o component.Options, args Arguments) (*Component, error) { } ls := service.(labelstore.LabelStore) - flowAppendable := prometheus.NewFanout(args.ForwardTo, o.ID, o.Registerer, ls) + alloyAppendable := prometheus.NewFanout(args.ForwardTo, o.ID, o.Registerer, ls) scrapeOptions := &scrape.Options{ ExtraMetrics: args.ExtraMetrics, HTTPClientOptions: []config_util.HTTPClientOption{ @@ -165,7 +165,7 @@ func New(o component.Options, args Arguments) (*Component, error) { }, EnableProtobufNegotiation: args.EnableProtobufNegotiation, } - scraper := scrape.NewManager(scrapeOptions, o.Logger, flowAppendable) + scraper := scrape.NewManager(scrapeOptions, o.Logger, alloyAppendable) targetsGauge := client_prometheus.NewGauge(client_prometheus.GaugeOpts{ Name: "agent_prometheus_scrape_targets_gauge", @@ -180,7 +180,7 @@ func New(o component.Options, args Arguments) (*Component, error) { cluster: clusterData, reloadTargets: make(chan struct{}, 1), scraper: scraper, - appendable: flowAppendable, + appendable: alloyAppendable, targetsGauge: targetsGauge, } @@ -319,9 +319,9 @@ func (c *Component) distTargets( // NOTE(@tpaschalis) First approach, manually building the // 'clustered' targets implementation every time. dt := discovery.NewDistributedTargets(clustering, c.cluster, targets) - flowTargets := dt.Get() - c.targetsGauge.Set(float64(len(flowTargets))) - promTargets := c.componentTargetsToProm(jobName, flowTargets) + alloyTargets := dt.Get() + c.targetsGauge.Set(float64(len(alloyTargets))) + promTargets := c.componentTargetsToProm(jobName, alloyTargets) return promTargets } diff --git a/internal/component/prometheus/scrape/scrape_test.go b/internal/component/prometheus/scrape/scrape_test.go index c700be4a3d..03c3081d5d 100644 --- a/internal/component/prometheus/scrape/scrape_test.go +++ b/internal/component/prometheus/scrape/scrape_test.go @@ -73,7 +73,7 @@ func TestBadRiverConfig(t *testing.T) { func TestForwardingToAppendable(t *testing.T) { opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus_client.NewRegistry(), GetServiceData: func(name string) (interface{}, error) { switch name { @@ -177,7 +177,7 @@ func TestCustomDialer(t *testing.T) { require.NoError(t, err) opts := component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus_client.NewRegistry(), GetServiceData: func(name string) (interface{}, error) { switch name { diff --git a/internal/component/pyroscope/appender.go b/internal/component/pyroscope/appender.go index 3f231adee2..41c2cca164 100644 --- a/internal/component/pyroscope/appender.go +++ b/internal/component/pyroscope/appender.go @@ -31,7 +31,7 @@ type RawSample struct { var _ Appendable = (*Fanout)(nil) -// Fanout supports the default Flow style of appendables since it can go to multiple outputs. It also allows the intercepting of appends. +// Fanout supports the default Alloy style of appendables since it can go to multiple outputs. It also allows the intercepting of appends. type Fanout struct { mut sync.RWMutex // children is where to fan out. diff --git a/internal/component/pyroscope/ebpf/ebpf_linux.go b/internal/component/pyroscope/ebpf/ebpf_linux.go index bb7b6d890a..3cc4b75b9c 100644 --- a/internal/component/pyroscope/ebpf/ebpf_linux.go +++ b/internal/component/pyroscope/ebpf/ebpf_linux.go @@ -51,12 +51,12 @@ func New(opts component.Options, args Arguments) (component.Component, error) { return nil, fmt.Errorf("ebpf session create: %w", err) } - flowAppendable := pyroscope.NewFanout(args.ForwardTo, opts.ID, opts.Registerer) + alloyAppendable := pyroscope.NewFanout(args.ForwardTo, opts.ID, opts.Registerer) res := &Component{ options: opts, metrics: ms, - appendable: flowAppendable, + appendable: alloyAppendable, args: args, targetFinder: targetFinder, session: session, diff --git a/internal/component/pyroscope/ebpf/ebpf_linux_test.go b/internal/component/pyroscope/ebpf/ebpf_linux_test.go index 97baf8f8f6..2940bb205d 100644 --- a/internal/component/pyroscope/ebpf/ebpf_linux_test.go +++ b/internal/component/pyroscope/ebpf/ebpf_linux_test.go @@ -71,7 +71,7 @@ func (m *mockSession) DebugInfo() interface{} { } func TestShutdownOnError(t *testing.T) { - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) ms := newMetrics(nil) targetFinder, err := sd.NewTargetFinder(os.DirFS("/foo"), logger, sd.TargetsOptions{ ContainerCacheSize: 1024, @@ -98,7 +98,7 @@ func TestShutdownOnError(t *testing.T) { } func TestContextShutdown(t *testing.T) { - logger := util.TestFlowLogger(t) + logger := util.TestAlloyLogger(t) ms := newMetrics(nil) targetFinder, err := sd.NewTargetFinder(os.DirFS("/foo"), logger, sd.TargetsOptions{ ContainerCacheSize: 1024, @@ -192,11 +192,11 @@ collect_kernel_profile = false`), &arg) } func newTestComponent(opts component.Options, args Arguments, session *mockSession, targetFinder sd.TargetFinder, ms *metrics) *Component { - flowAppendable := pyroscope.NewFanout(args.ForwardTo, opts.ID, opts.Registerer) + alloyAppendable := pyroscope.NewFanout(args.ForwardTo, opts.ID, opts.Registerer) res := &Component{ options: opts, metrics: ms, - appendable: flowAppendable, + appendable: alloyAppendable, args: args, targetFinder: targetFinder, session: session, diff --git a/internal/component/pyroscope/scrape/scrape.go b/internal/component/pyroscope/scrape/scrape.go index a86bfa5689..8a45948f3a 100644 --- a/internal/component/pyroscope/scrape/scrape.go +++ b/internal/component/pyroscope/scrape/scrape.go @@ -247,14 +247,14 @@ func New(o component.Options, args Arguments) (*Component, error) { } clusterData := data.(cluster.Cluster) - flowAppendable := pyroscope.NewFanout(args.ForwardTo, o.ID, o.Registerer) - scraper := NewManager(flowAppendable, o.Logger) + alloyAppendable := pyroscope.NewFanout(args.ForwardTo, o.ID, o.Registerer) + scraper := NewManager(alloyAppendable, o.Logger) c := &Component{ opts: o, cluster: clusterData, reloadTargets: make(chan struct{}, 1), scraper: scraper, - appendable: flowAppendable, + appendable: alloyAppendable, } // Call to Update() to set the receivers and targets once at the start. diff --git a/internal/component/pyroscope/scrape/scrape_test.go b/internal/component/pyroscope/scrape/scrape_test.go index 57c5c8d6bb..e08120f1c8 100644 --- a/internal/component/pyroscope/scrape/scrape_test.go +++ b/internal/component/pyroscope/scrape/scrape_test.go @@ -29,7 +29,7 @@ func TestComponent(t *testing.T) { arg := NewDefaultArguments() arg.JobName = "test" c, err := New(component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, GetServiceData: getServiceData, @@ -209,7 +209,7 @@ func TestUpdateWhileScraping(t *testing.T) { args.ScrapeInterval = 1 * time.Second c, err := New(component.Options{ - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) {}, GetServiceData: getServiceData, diff --git a/internal/component/pyroscope/write/write_test.go b/internal/component/pyroscope/write/write_test.go index 73172bb99d..c59d49565a 100644 --- a/internal/component/pyroscope/write/write_test.go +++ b/internal/component/pyroscope/write/write_test.go @@ -44,7 +44,7 @@ func Test_Write_FanOut(t *testing.T) { func(_ context.Context, req *connect.Request[pushv1.PushRequest]) (*connect.Response[pushv1.PushResponse], error) { pushTotal.Inc() require.Equal(t, "test", req.Header()["X-Test-Header"][0]) - require.Contains(t, req.Header()["User-Agent"][0], "GrafanaAgent/") + require.Contains(t, req.Header()["User-Agent"][0], "Alloy/") require.Equal(t, []*typesv1.LabelPair{ {Name: "__name__", Value: "test"}, {Name: "foo", Value: "buzz"}, @@ -85,7 +85,7 @@ func Test_Write_FanOut(t *testing.T) { wg.Add(1) c, err := New(component.Options{ ID: "1", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) { defer wg.Done() @@ -161,7 +161,7 @@ func Test_Write_Update(t *testing.T) { wg.Add(1) c, err := New(component.Options{ ID: "1", - Logger: util.TestFlowLogger(t), + Logger: util.TestAlloyLogger(t), Registerer: prometheus.NewRegistry(), OnStateChange: func(e component.Exports) { defer wg.Done() diff --git a/internal/component/registry.go b/internal/component/registry.go index 47695215a5..1c3c3b0a32 100644 --- a/internal/component/registry.go +++ b/internal/component/registry.go @@ -80,7 +80,7 @@ type Options struct { DataPath string // OnStateChange may be invoked at any time by a component whose Export value - // changes. The Flow controller then will queue re-processing components + // changes. The Alloy controller then will queue re-processing components // which depend on the changed component. // // OnStateChange will panic if e does not match the Exports type registered diff --git a/internal/converter/converter.go b/internal/converter/converter.go index 124a1c0513..40bd349cf8 100644 --- a/internal/converter/converter.go +++ b/internal/converter/converter.go @@ -1,5 +1,5 @@ // Package converter exposes utilities to convert config files from other -// programs to Grafana Agent Flow configurations. +// programs to Grafana Alloy configurations. package converter import ( @@ -29,8 +29,7 @@ var SupportedFormats = []string{ string(InputStatic), } -// Convert generates a Grafana Agent Flow config given an input configuration -// file. +// Convert generates a Grafana Alloy config given an input configuration file. // // extraArgs are supported to be passed along to a converter such as enabling // integrations-next for the static converter. Converters that do not support @@ -41,10 +40,10 @@ var SupportedFormats = []string{ // should just be the starting point rather than the final destination. // // Note that not all functionality defined in the input configuration may have -// an equivalent in Grafana Agent Flow. If the conversion could not complete -// because of mismatched functionality, an error is returned with no resulting -// config. If the conversion completed successfully but generated warnings, an -// error is returned alongside the resulting config. +// an equivalent in Grafana Alloy. If the conversion could not complete because +// of mismatched functionality, an error is returned with no resulting config. +// If the conversion completed successfully but generated warnings, an error is +// returned alongside the resulting config. func Convert(in []byte, kind Input, extraArgs []string) ([]byte, diag.Diagnostics) { switch kind { case InputPrometheus: diff --git a/internal/converter/internal/common/http_client_config.go b/internal/converter/internal/common/http_client_config.go index 8c267d2a3a..6b7473334c 100644 --- a/internal/converter/internal/common/http_client_config.go +++ b/internal/converter/internal/common/http_client_config.go @@ -28,7 +28,7 @@ func ToHttpClientConfig(httpClientConfig *prom_config.HTTPClientConfig) *config. } // ValidateHttpClientConfig returns [diag.Diagnostics] for currently -// unsupported Flow features available in Prometheus. +// unsupported Alloy features available in Prometheus. func ValidateHttpClientConfig(httpClientConfig *prom_config.HTTPClientConfig) diag.Diagnostics { var diags diag.Diagnostics diff --git a/internal/converter/internal/common/river_utils.go b/internal/converter/internal/common/river_utils.go index 81e1a38ca5..eb3734ecc1 100644 --- a/internal/converter/internal/common/river_utils.go +++ b/internal/converter/internal/common/river_utils.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/alloy/syntax/scanner" "github.com/grafana/alloy/internal/component" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/discovery" "github.com/grafana/alloy/internal/converter/diag" "github.com/grafana/alloy/syntax/token/builder" @@ -55,7 +55,7 @@ func getValueOverrideHook() builder.ValueOverrideHook { } } return secrets - case flow_relabel.Regexp: + case alloy_relabel.Regexp: return value.String() case []discovery.Target: return ConvertTargets{ diff --git a/internal/converter/internal/common/validate.go b/internal/converter/internal/common/validate.go index 9921ee1dcd..e4478cbce8 100644 --- a/internal/converter/internal/common/validate.go +++ b/internal/converter/internal/common/validate.go @@ -64,7 +64,7 @@ func ValidateNodes(f *builder.File) diag.Diagnostics { label += "." + n.Label } if _, ok := labels[label]; ok { - diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("duplicate label after conversion %q. this is due to how valid flow labels are assembled and can be avoided by updating named properties in the source config.", label)) + diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("duplicate label after conversion %q. this is due to how valid Alloy labels are assembled and can be avoided by updating named properties in the source config.", label)) } else { labels[label] = label } diff --git a/internal/converter/internal/common/weaveworks_server.go b/internal/converter/internal/common/weaveworks_server.go index 6730e0c55f..563c6118f4 100644 --- a/internal/converter/internal/common/weaveworks_server.go +++ b/internal/converter/internal/common/weaveworks_server.go @@ -15,7 +15,7 @@ func DefaultWeaveWorksServerCfg() server.Config { return cfg } -func WeaveWorksServerToFlowServer(config server.Config) *fnet.ServerConfig { +func WeaveworksServerToAlloyServer(config server.Config) *fnet.ServerConfig { return &fnet.ServerConfig{ HTTP: &fnet.HTTPConfig{ ListenAddress: config.HTTPListenAddress, diff --git a/internal/converter/internal/otelcolconvert/converter.go b/internal/converter/internal/otelcolconvert/converter.go index 128fb1683d..dfdde2adca 100644 --- a/internal/converter/internal/otelcolconvert/converter.go +++ b/internal/converter/internal/otelcolconvert/converter.go @@ -12,14 +12,14 @@ import ( ) // componentConverter represents a converter which converts an OpenTelemetry -// Collector component into a Flow component. +// Collector component into an Alloy component. type componentConverter interface { // Factory should return the factory for the OpenTelemetry Collector // component. Factory() component.Factory - // InputComponentName should return the name of the Flow component where - // other Flow components forward OpenTelemetry data to. + // InputComponentName should return the name of the Alloy component where + // other Alloy components forward OpenTelemetry data to. // // For example, a converter which emits a chain of components // (otelcol.receiver.prometheus -> prometheus.remote_write) should return @@ -31,14 +31,14 @@ type componentConverter interface { InputComponentName() string // ConvertAndAppend should convert the provided OpenTelemetry Collector - // component configuration into Flow configuration and append the result to + // component configuration into Alloy configuration and append the result to // [state.Body]. Implementations are expected to append configuration where // all required arguments are set and all optional arguments are set to the - // values from the input configuration or the Flow mode default. + // values from the input configuration or the Alloy default. // // ConvertAndAppend may be called more than once with the same component used - // in different pipelines. Use [state.FlowComponentLabel] to get a guaranteed - // unique Flow component label for the current state. + // in different pipelines. Use [state.AlloyComponentLabel] to get a guaranteed + // unique Alloy component label for the current state. ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics } @@ -59,7 +59,7 @@ type state struct { // converterLookup maps a converter key to the associated converter instance. converterLookup map[converterKey]componentConverter - // extensionLookup maps OTel extensions to Flow component IDs. + // extensionLookup maps OTel extensions to Alloy component IDs. extensionLookup map[component.ID]componentID componentID component.InstanceID // ID of the current component being converted. @@ -76,20 +76,20 @@ type converterKey struct { // [componentConverter] should use this to append components. func (state *state) Body() *builder.Body { return state.file.Body() } -// FlowComponentLabel returns the unique Flow label for the OpenTelemetry +// AlloyComponentLabel returns the unique Alloy label for the OpenTelemetry // Component component being converted. It is safe to use this label to create -// multiple Flow components in a chain. -func (state *state) FlowComponentLabel() string { - return state.flowLabelForComponent(state.componentID) +// multiple Alloy components in a chain. +func (state *state) AlloyComponentLabel() string { + return state.alloyLabelForComponent(state.componentID) } -// flowLabelForComponent returns the unique Flow label for the given +// alloyLabelForComponent returns the unique Alloy label for the given // OpenTelemetry Collector component. -func (state *state) flowLabelForComponent(c component.InstanceID) string { +func (state *state) alloyLabelForComponent(c component.InstanceID) string { const defaultLabel = "default" // We need to prove that it's possible to statelessly compute the label for a - // Flow component just by using the group name and the otelcol component name: + // Alloy component just by using the group name and the otelcol component name: // // 1. OpenTelemetry Collector components are created once per pipeline, where // the pipeline must have a unique key (a combination of telemetry type and @@ -117,7 +117,7 @@ func (state *state) flowLabelForComponent(c component.InstanceID) string { // We want to make the component label as idiomatic as possible. If both the // group and component name are empty, we'll name it "default," aligning - // with standard Flow naming conventions. + // with standard Alloy naming conventions. // // Otherwise, we'll replace empty group and component names with "default" // and concatenate them with an underscore. @@ -142,7 +142,7 @@ func (state *state) flowLabelForComponent(c component.InstanceID) string { return common.SanitizeIdentifierPanics(unsanitizedLabel) } -// Next returns the set of Flow component IDs for a given data type that the +// Next returns the set of Alloy component IDs for a given data type that the // current component being converted should forward data to. func (state *state) Next(c component.InstanceID, dataType component.DataType) []componentID { instances := state.nextInstances(c, dataType) @@ -156,7 +156,7 @@ func (state *state) Next(c component.InstanceID, dataType component.DataType) [] } // Look up the converter associated with the instance and retrieve the name - // of the Flow component expected to receive data. + // of the Alloy component expected to receive data. converter, found := state.converterLookup[key] if !found { panic(fmt.Sprintf("otelcolconvert: no component name found for converter key %v", key)) @@ -166,7 +166,7 @@ func (state *state) Next(c component.InstanceID, dataType component.DataType) [] panic(fmt.Sprintf("otelcolconvert: converter %T returned empty component name", converter)) } - componentLabel := state.flowLabelForComponent(instance) + componentLabel := state.alloyLabelForComponent(instance) ids = append(ids, componentID{ Name: strings.Split(componentName, "."), diff --git a/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go b/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go index 7c6b9371df..be7e7ae9c4 100644 --- a/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go @@ -28,7 +28,7 @@ func (attributesProcessorConverter) InputComponentName() string { func (attributesProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toAttributesProcessor(state, id, cfg.(*attributesprocessor.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "processor", "attributes"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_basicauthextension.go b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go index ce95cfb82c..a5bb6b462a 100644 --- a/internal/converter/internal/otelcolconvert/converter_basicauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go @@ -26,7 +26,7 @@ func (basicAuthConverterConverter) InputComponentName() string { return "otelcol func (basicAuthConverterConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toBasicAuthExtension(cfg.(*basicauthextension.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "auth", "basic"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_batchprocessor.go b/internal/converter/internal/otelcolconvert/converter_batchprocessor.go index b6c24fb914..3538723b45 100644 --- a/internal/converter/internal/otelcolconvert/converter_batchprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_batchprocessor.go @@ -28,7 +28,7 @@ func (batchProcessorConverter) InputComponentName() string { func (batchProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toBatchProcessor(state, id, cfg.(*batchprocessor.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "processor", "batch"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go b/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go index 1c68636636..5105d32b7e 100644 --- a/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go @@ -29,7 +29,7 @@ func (bearerTokenAuthExtensionConverter) InputComponentName() string { return "o func (bearerTokenAuthExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() bcfg := cfg.(*bearertokenauthextension.Config) var block *builder.Block @@ -66,7 +66,7 @@ func toBearerTokenAuthExtension(cfg *bearertokenauthextension.Config) *bearer.Ar } } func toBearerTokenAuthExtensionWithFilename(state *state, cfg *bearertokenauthextension.Config) (*bearer.Arguments, string) { - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := &file.Arguments{ Filename: cfg.Filename, Type: file.DefaultArguments.Type, // Using the default type (fsnotify) since that's what upstream also uses. diff --git a/internal/converter/internal/otelcolconvert/converter_filterprocessor.go b/internal/converter/internal/otelcolconvert/converter_filterprocessor.go index 06ea80f2c1..265106120c 100644 --- a/internal/converter/internal/otelcolconvert/converter_filterprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_filterprocessor.go @@ -28,7 +28,7 @@ func (filterProcessorConverter) InputComponentName() string { func (filterProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toFilterProcessor(state, id, cfg.(*filterprocessor.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "processor", "filter"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go b/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go index 64e8bb4cce..b864698b29 100644 --- a/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go +++ b/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go @@ -26,7 +26,7 @@ func (headersSetterExtensionConverter) InputComponentName() string { return "ote func (headersSetterExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toHeadersSetterExtension(cfg.(*headerssetterextension.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "auth", "headers"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go b/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go index 9ee1a2723c..d2919782ef 100644 --- a/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go @@ -27,7 +27,7 @@ func (jaegerReceiverConverter) InputComponentName() string { return "" } func (jaegerReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toJaegerReceiver(state, id, cfg.(*jaegerreceiver.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "receiver", "jaeger"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go b/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go index 51e66fa29e..6818f3b1d6 100644 --- a/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go +++ b/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go @@ -27,7 +27,7 @@ func (jaegerRemoteSamplingExtensionConverter) InputComponentName() string { func (jaegerRemoteSamplingExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toJaegerRemoteSamplingExtension(cfg.(*jaegerremotesampling.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "extension", "jaeger_remote_sampling"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go b/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go index bb0db334e9..96bf5b3fbd 100644 --- a/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go @@ -28,7 +28,7 @@ func (k8sAttributesProcessorConverter) InputComponentName() string { func (k8sAttributesProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toK8SAttributesProcessor(state, id, cfg.(*k8sattributesprocessor.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "processor", "k8sattributes"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go index dee9d99aef..33aaae26ca 100644 --- a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go @@ -29,7 +29,7 @@ func (kafkaReceiverConverter) InputComponentName() string { return "" } func (kafkaReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toKafkaReceiver(state, id, cfg.(*kafkareceiver.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "receiver", "kafka"}, label, args) @@ -183,8 +183,8 @@ func toKafkaMessageMarking(cfg kafkareceiver.MessageMarking) kafka.MessageMarkin func toKafkaHeaderExtraction(cfg kafkareceiver.HeaderExtraction) kafka.HeaderExtraction { // If cfg.Headers is nil, we set it to an empty slice to align with - // the default of the Flow component; if this isn't done than default headers - // will be explicitly set as `[]` in the generated Flow configuration file, which + // the default of the Alloy component; if this isn't done than default headers + // will be explicitly set as `[]` in the generated Alloy configuration file, which // may confuse users. if cfg.Headers == nil { cfg.Headers = []string{} diff --git a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go index 961c170832..0119d35816 100644 --- a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go @@ -31,7 +31,7 @@ func (loadbalancingExporterConverter) InputComponentName() string { func (loadbalancingExporterConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() overrideHook := func(val interface{}) interface{} { switch val.(type) { case auth.Handler: diff --git a/internal/converter/internal/otelcolconvert/converter_loggingexporter.go b/internal/converter/internal/otelcolconvert/converter_loggingexporter.go index 226fc9d28f..505b59b422 100644 --- a/internal/converter/internal/otelcolconvert/converter_loggingexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_loggingexporter.go @@ -28,7 +28,7 @@ func (loggingExporterConverter) InputComponentName() string { func (loggingExporterConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toOtelcolExporterLogging(cfg.(*loggingexporter.Config)) block := common.NewBlockWithOverrideFn([]string{"otelcol", "exporter", "logging"}, label, args, nil) diff --git a/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go b/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go index f9e39a7b08..fff1ea5b5b 100644 --- a/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go @@ -28,7 +28,7 @@ func (memoryLimiterProcessorConverter) InputComponentName() string { func (memoryLimiterProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toMemoryLimiterProcessor(state, id, cfg.(*memorylimiterprocessor.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "processor", "memory_limiter"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go index 95f82414be..cf2b3e9d02 100644 --- a/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go @@ -26,7 +26,7 @@ func (oauth2ClientAuthExtensionConverter) InputComponentName() string { return " func (oauth2ClientAuthExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toOAuth2ClientAuthExtension(cfg.(*oauth2clientauthextension.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "auth", "oauth2"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go b/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go index 4745582e30..53ee5de203 100644 --- a/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go @@ -26,7 +26,7 @@ func (opencensusReceiverConverter) InputComponentName() string { return "" } func (opencensusReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toOpencensusReceiver(state, id, cfg.(*opencensusreceiver.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "receiver", "opencensus"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_otlpexporter.go b/internal/converter/internal/otelcolconvert/converter_otlpexporter.go index 5584ed8686..c370f771c7 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlpexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_otlpexporter.go @@ -33,7 +33,7 @@ func (otlpExporterConverter) InputComponentName() string { return "otelcol.expor func (otlpExporterConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() overrideHook := func(val interface{}) interface{} { switch val.(type) { case auth.Handler: diff --git a/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go b/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go index abc363514e..40f7f07524 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go @@ -33,7 +33,7 @@ func (otlpHTTPExporterConverter) InputComponentName() string { func (otlpHTTPExporterConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() overrideHook := func(val interface{}) interface{} { switch val.(type) { case auth.Handler: diff --git a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go index 7e23bab1a9..8f27914ff7 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go @@ -29,7 +29,7 @@ func (otlpReceiverConverter) InputComponentName() string { return "" } func (otlpReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toOtelcolReceiverOTLP(state, id, cfg.(*otlpreceiver.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "receiver", "otlp"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go b/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go index f7ef7d482f..021ff5f8c9 100644 --- a/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go @@ -28,7 +28,7 @@ func (probabilisticSamplerProcessorConverter) InputComponentName() string { func (probabilisticSamplerProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toProbabilisticSamplerProcessor(state, id, cfg.(*probabilisticsamplerprocessor.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "processor", "probabilistic_sampler"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go b/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go index d72ce66d1b..14a7349778 100644 --- a/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go +++ b/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go @@ -29,7 +29,7 @@ func (spanmetricsConnectorConverter) InputComponentName() string { func (spanmetricsConnectorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toSpanmetricsConnector(state, id, cfg.(*spanmetricsconnector.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "connector", "spanmetrics"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_spanprocessor.go b/internal/converter/internal/otelcolconvert/converter_spanprocessor.go index fe6f887699..a3b2136d8f 100644 --- a/internal/converter/internal/otelcolconvert/converter_spanprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_spanprocessor.go @@ -25,7 +25,7 @@ func (spanProcessorConverter) InputComponentName() string { return "otelcol.proc func (spanProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toSpanProcessor(state, id, cfg.(*spanprocessor.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "processor", "span"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go b/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go index b292d00d66..70b239abe3 100644 --- a/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go @@ -29,7 +29,7 @@ func (tailSamplingProcessorConverter) InputComponentName() string { func (tailSamplingProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toTailSamplingProcessor(state, id, cfg.(*tailsamplingprocessor.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "processor", "tail_sampling"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_transformprocessor.go b/internal/converter/internal/otelcolconvert/converter_transformprocessor.go index dec37c3c1d..0f42fd9a11 100644 --- a/internal/converter/internal/otelcolconvert/converter_transformprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_transformprocessor.go @@ -28,7 +28,7 @@ func (transformProcessorConverter) InputComponentName() string { func (transformProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toTransformProcessor(state, id, cfg.(*transformprocessor.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "processor", "transform"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go b/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go index 28ca8126c9..d1c67dda88 100644 --- a/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go @@ -24,7 +24,7 @@ func (zipkinReceiverConverter) InputComponentName() string { return "" } func (zipkinReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics - label := state.FlowComponentLabel() + label := state.AlloyComponentLabel() args := toZipkinReceiver(state, id, cfg.(*zipkinreceiver.Config)) block := common.NewBlockWithOverride([]string{"otelcol", "receiver", "zipkin"}, label, args) diff --git a/internal/converter/internal/otelcolconvert/otelcolconvert.go b/internal/converter/internal/otelcolconvert/otelcolconvert.go index d6b04c0e9d..32a6765114 100644 --- a/internal/converter/internal/otelcolconvert/otelcolconvert.go +++ b/internal/converter/internal/otelcolconvert/otelcolconvert.go @@ -24,7 +24,7 @@ import ( // This package is split into a set of [componentConverter] implementations // which convert a single OpenTelemetry Collector component into one or more -// Flow components. +// Alloy components. // // To support converting a new OpenTelmetry Component, follow these steps and // replace COMPONENT with the name of the component being converted: @@ -70,7 +70,7 @@ func Convert(in []byte, extraArgs []string) ([]byte, diag.Diagnostics) { var buf bytes.Buffer if _, err := f.WriteTo(&buf); err != nil { - diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to render Flow config: %s", err.Error())) + diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to render Alloy config: %s", err.Error())) return nil, diags } @@ -142,7 +142,7 @@ func getFactories() otelcol.Factories { } // AppendConfig converts the provided OpenTelemetry config into an equivalent -// Flow config and appends the result to the provided file. +// Alloy config and appends the result to the provided file. func AppendConfig(file *builder.File, cfg *otelcol.Config, labelPrefix string) diag.Diagnostics { var diags diag.Diagnostics @@ -171,7 +171,7 @@ func AppendConfig(file *builder.File, cfg *otelcol.Config, labelPrefix string) d // listen on the same port. // // This isn't a problem in pure OpenTelemetry Collector because it internally - // deduplicates receiver instances, but since Flow don't have this logic we + // deduplicates receiver instances, but since Alloy don't have this logic we // need to reject these kinds of configs for now. if duplicateDiags := validateNoDuplicateReceivers(groups, connectorIDs); len(duplicateDiags) > 0 { diags.AddAll(duplicateDiags) @@ -179,7 +179,7 @@ func AppendConfig(file *builder.File, cfg *otelcol.Config, labelPrefix string) d } // We build the list of extensions 'activated' (defined in the service) as - // Flow components and keep a mapping of their OTel IDs to the blocks we've + // Alloy components and keep a mapping of their OTel IDs to the blocks we've // built. // Since there's no concept of multiple extensions per group or telemetry // signal, we can build them before iterating over the groups. @@ -192,7 +192,7 @@ func AppendConfig(file *builder.File, cfg *otelcol.Config, labelPrefix string) d cfg: cfg, file: file, // We pass an empty pipelineGroup to make calls to - // FlowComponentLabel valid for both the converter authors and the + // AlloyComponentLabel valid for both the converter authors and the // extension table mapping. group: &pipelineGroup{}, @@ -213,7 +213,7 @@ func AppendConfig(file *builder.File, cfg *otelcol.Config, labelPrefix string) d extensionTable[ext] = componentID{ Name: strings.Split(conv.InputComponentName(), "."), - Label: state.FlowComponentLabel(), + Label: state.AlloyComponentLabel(), } } @@ -265,7 +265,7 @@ func AppendConfig(file *builder.File, cfg *otelcol.Config, labelPrefix string) d } // validateNoDuplicateReceivers validates that a given receiver does not appear -// in two different pipeline groups. This is required because Flow does not +// in two different pipeline groups. This is required because Alloy does not // allow the same receiver to be instantiated more than once, while this is // fine in OpenTelemetry due to internal deduplication rules. func validateNoDuplicateReceivers(groups []pipelineGroup, connectorIDs []component.ID) diag.Diagnostics { diff --git a/internal/converter/internal/otelcolconvert/pipeline_group.go b/internal/converter/internal/otelcolconvert/pipeline_group.go index 3c6f278aad..6c5cc31932 100644 --- a/internal/converter/internal/otelcolconvert/pipeline_group.go +++ b/internal/converter/internal/otelcolconvert/pipeline_group.go @@ -44,7 +44,7 @@ type pipelineGroup struct { // traces/2]. The key used for grouping is the name of the pipeline, so that // pipelines with matching names belong to the same group. // -// This allows us to emit a Flow-native pipeline, where one component is +// This allows us to emit an Alloy-native pipeline, where one component is // responsible for multiple telemetry types, as opposed as to creating the // otlp/2 receiver two separate times (once for metrics and once for traces). // diff --git a/internal/converter/internal/prometheusconvert/component/relabel.go b/internal/converter/internal/prometheusconvert/component/relabel.go index 7e8264473d..f5eaa71ed9 100644 --- a/internal/converter/internal/prometheusconvert/component/relabel.go +++ b/internal/converter/internal/prometheusconvert/component/relabel.go @@ -3,7 +3,7 @@ package component import ( "fmt" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/discovery" disc_relabel "github.com/grafana/alloy/internal/component/discovery/relabel" "github.com/grafana/alloy/internal/component/prometheus/relabel" @@ -35,7 +35,7 @@ func toRelabelArguments(relabelConfigs []*prom_relabel.Config, forwardTo []stora return &relabel.Arguments{ ForwardTo: forwardTo, - MetricRelabelConfigs: ToFlowRelabelConfigs(relabelConfigs), + MetricRelabelConfigs: ToAlloyRelabelConfigs(relabelConfigs), CacheSize: 100_000, } } @@ -58,16 +58,16 @@ func AppendDiscoveryRelabel(pb *build.PrometheusBlocks, relabelConfigs []*prom_r func toDiscoveryRelabelArguments(relabelConfigs []*prom_relabel.Config, targets []discovery.Target) *disc_relabel.Arguments { return &disc_relabel.Arguments{ Targets: targets, - RelabelConfigs: ToFlowRelabelConfigs(relabelConfigs), + RelabelConfigs: ToAlloyRelabelConfigs(relabelConfigs), } } -func ToFlowRelabelConfigs(relabelConfigs []*prom_relabel.Config) []*flow_relabel.Config { +func ToAlloyRelabelConfigs(relabelConfigs []*prom_relabel.Config) []*alloy_relabel.Config { if len(relabelConfigs) == 0 { return nil } - var metricRelabelConfigs []*flow_relabel.Config + var metricRelabelConfigs []*alloy_relabel.Config for _, relabelConfig := range relabelConfigs { var sourceLabels []string if len(relabelConfig.SourceLabels) > 0 { @@ -77,14 +77,14 @@ func ToFlowRelabelConfigs(relabelConfigs []*prom_relabel.Config) []*flow_relabel } } - metricRelabelConfigs = append(metricRelabelConfigs, &flow_relabel.Config{ + metricRelabelConfigs = append(metricRelabelConfigs, &alloy_relabel.Config{ SourceLabels: sourceLabels, Separator: relabelConfig.Separator, - Regex: flow_relabel.Regexp(relabelConfig.Regex), + Regex: alloy_relabel.Regexp(relabelConfig.Regex), Modulus: relabelConfig.Modulus, TargetLabel: relabelConfig.TargetLabel, Replacement: relabelConfig.Replacement, - Action: flow_relabel.Action(relabelConfig.Action), + Action: alloy_relabel.Action(relabelConfig.Action), }) } diff --git a/internal/converter/internal/prometheusconvert/component/remote_write.go b/internal/converter/internal/prometheusconvert/component/remote_write.go index 4b1156babd..384cec27fd 100644 --- a/internal/converter/internal/prometheusconvert/component/remote_write.go +++ b/internal/converter/internal/prometheusconvert/component/remote_write.go @@ -75,7 +75,7 @@ func getEndpointOptions(remoteWriteConfigs []*prom_config.RemoteWriteConfig) []* HTTPClientConfig: common.ToHttpClientConfig(&remoteWriteConfig.HTTPClientConfig), QueueOptions: toQueueOptions(&remoteWriteConfig.QueueConfig), MetadataOptions: toMetadataOptions(&remoteWriteConfig.MetadataConfig), - WriteRelabelConfigs: ToFlowRelabelConfigs(remoteWriteConfig.WriteRelabelConfigs), + WriteRelabelConfigs: ToAlloyRelabelConfigs(remoteWriteConfig.WriteRelabelConfigs), SigV4: toSigV4(remoteWriteConfig.SigV4Config), AzureAD: toAzureAD(remoteWriteConfig.AzureADConfig), } diff --git a/internal/converter/internal/prometheusconvert/prometheusconvert.go b/internal/converter/internal/prometheusconvert/prometheusconvert.go index 7c6d00897f..e97b9f6dd1 100644 --- a/internal/converter/internal/prometheusconvert/prometheusconvert.go +++ b/internal/converter/internal/prometheusconvert/prometheusconvert.go @@ -43,7 +43,7 @@ func Convert(in []byte, extraArgs []string) ([]byte, diag.Diagnostics) { var buf bytes.Buffer if _, err := f.WriteTo(&buf); err != nil { - diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to render Flow config: %s", err.Error())) + diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to render Alloy config: %s", err.Error())) return nil, diags } @@ -57,16 +57,16 @@ func Convert(in []byte, extraArgs []string) ([]byte, diag.Diagnostics) { } // AppendAll analyzes the entire prometheus config in memory and transforms it -// into Flow Arguments. It then appends each argument to the file builder. -// Exports from other components are correctly referenced to build the Flow -// pipeline. +// into Alloy component Arguments. It then appends each argument to the file +// builder. Exports from other components are correctly referenced to build the +// Alloy pipeline. func AppendAll(f *builder.File, promConfig *prom_config.Config) diag.Diagnostics { return AppendAllNested(f, promConfig, nil, []discovery.Target{}, nil) } // AppendAllNested analyzes the entire prometheus config in memory and transforms it -// into Flow Arguments. It then appends each argument to the file builder. -// Exports from other components are correctly referenced to build the Flow +// into Alloy component Arguments. It then appends each argument to the file builder. +// Exports from other components are correctly referenced to build the Alloy // pipeline. Additional options can be provided overriding the job name, extra // scrape targets, and predefined remote write exports. func AppendAllNested(f *builder.File, promConfig *prom_config.Config, jobNameToCompLabelsFunc func(string) string, extraScrapeTargets []discovery.Target, remoteWriteExports *remotewrite.Exports) diag.Diagnostics { diff --git a/internal/converter/internal/promtailconvert/internal/build/docker_sd.go b/internal/converter/internal/promtailconvert/internal/build/docker_sd.go index f50d92634c..60b4976842 100644 --- a/internal/converter/internal/promtailconvert/internal/build/docker_sd.go +++ b/internal/converter/internal/promtailconvert/internal/build/docker_sd.go @@ -4,7 +4,7 @@ import ( "time" "github.com/grafana/alloy/internal/component/common/loki" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/discovery" "github.com/grafana/alloy/internal/component/discovery/docker" loki_docker "github.com/grafana/alloy/internal/component/loki/source/docker" @@ -35,7 +35,7 @@ func (s *ScrapeConfigBuilder) AppendDockerPipeline() { switch val.(type) { case []discovery.Target: // override targets expression to our string return common.CustomTokenizer{Expr: targets} - case flow_relabel.Rules: // use the relabel rules defined for this pipeline + case alloy_relabel.Rules: // use the relabel rules defined for this pipeline return common.CustomTokenizer{Expr: s.getOrNewDiscoveryRelabelRules()} } return val @@ -57,7 +57,7 @@ func toLokiSourceDocker(sd *moby.DockerSDConfig, forwardTo []loki.LogsReceiver) Targets: nil, ForwardTo: forwardTo, Labels: nil, - RelabelRules: flow_relabel.Rules{}, + RelabelRules: alloy_relabel.Rules{}, HTTPClientConfig: common.ToHttpClientConfig(&sd.HTTPClientConfig), RefreshInterval: time.Duration(sd.RefreshInterval), } @@ -73,23 +73,23 @@ func toDiscoveryDocker(sdConfig *moby.DockerSDConfig) *docker.Arguments { Port: sdConfig.Port, HostNetworkingHost: sdConfig.HostNetworkingHost, RefreshInterval: time.Duration(sdConfig.RefreshInterval), - Filters: toFlowDockerSDFilters(sdConfig.Filters), + Filters: toAlloyDockerSDFilters(sdConfig.Filters), HTTPClientConfig: *common.ToHttpClientConfig(&sdConfig.HTTPClientConfig), } } -func toFlowDockerSDFilters(filters []moby.Filter) []docker.Filter { +func toAlloyDockerSDFilters(filters []moby.Filter) []docker.Filter { if len(filters) == 0 { return nil } - flowFilters := make([]docker.Filter, len(filters)) + alloyFilters := make([]docker.Filter, len(filters)) for i, filter := range filters { - flowFilters[i] = docker.Filter{ + alloyFilters[i] = docker.Filter{ Name: filter.Name, Values: filter.Values, } } - return flowFilters + return alloyFilters } diff --git a/internal/converter/internal/promtailconvert/internal/build/gcplog.go b/internal/converter/internal/promtailconvert/internal/build/gcplog.go index 71af0e89de..4f2dd2cd18 100644 --- a/internal/converter/internal/promtailconvert/internal/build/gcplog.go +++ b/internal/converter/internal/promtailconvert/internal/build/gcplog.go @@ -30,9 +30,9 @@ func (s *ScrapeConfigBuilder) AppendGCPLog() { } case "push": s.diags.AddAll(common.ValidateWeaveWorksServerCfg(cfg.Server)) - flowServer := common.WeaveWorksServerToFlowServer(cfg.Server) + alloyServer := common.WeaveworksServerToAlloyServer(cfg.Server) pushConfig = &gcptypes.PushConfig{ - Server: flowServer, + Server: alloyServer, PushTimeout: cfg.PushTimeout, Labels: convertPromLabels(cfg.Labels), UseIncomingTimestamp: cfg.UseIncomingTimestamp, diff --git a/internal/converter/internal/promtailconvert/internal/build/herokudrain.go b/internal/converter/internal/promtailconvert/internal/build/herokudrain.go index efe009e32b..bb9ea47edd 100644 --- a/internal/converter/internal/promtailconvert/internal/build/herokudrain.go +++ b/internal/converter/internal/promtailconvert/internal/build/herokudrain.go @@ -12,7 +12,7 @@ func (s *ScrapeConfigBuilder) AppendHerokuDrainConfig() { } hCfg := s.cfg.HerokuDrainConfig args := heroku.Arguments{ - Server: common.WeaveWorksServerToFlowServer(hCfg.Server), + Server: common.WeaveworksServerToAlloyServer(hCfg.Server), Labels: convertPromLabels(hCfg.Labels), UseIncomingTimestamp: hCfg.UseIncomingTimestamp, ForwardTo: s.getOrNewProcessStageReceivers(), diff --git a/internal/converter/internal/promtailconvert/internal/build/journal.go b/internal/converter/internal/promtailconvert/internal/build/journal.go index 4d1b617527..6e289c4c83 100644 --- a/internal/converter/internal/promtailconvert/internal/build/journal.go +++ b/internal/converter/internal/promtailconvert/internal/build/journal.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - flowrelabel "github.com/grafana/alloy/internal/component/common/relabel" + alloyrelabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/loki/source/journal" "github.com/grafana/alloy/internal/converter/diag" "github.com/grafana/alloy/internal/converter/internal/common" @@ -34,11 +34,11 @@ func (s *ScrapeConfigBuilder) AppendJournalConfig() { Path: jc.Path, Receivers: s.getOrNewProcessStageReceivers(), Labels: convertPromLabels(jc.Labels), - RelabelRules: flowrelabel.Rules{}, + RelabelRules: alloyrelabel.Rules{}, } relabelRulesExpr := s.getOrNewDiscoveryRelabelRules() hook := func(val interface{}) interface{} { - if _, ok := val.(flowrelabel.Rules); ok { + if _, ok := val.(alloyrelabel.Rules); ok { return common.CustomTokenizer{Expr: relabelRulesExpr} } return val diff --git a/internal/converter/internal/promtailconvert/internal/build/push_api.go b/internal/converter/internal/promtailconvert/internal/build/push_api.go index 12f20df51e..8f2f1c8575 100644 --- a/internal/converter/internal/promtailconvert/internal/build/push_api.go +++ b/internal/converter/internal/promtailconvert/internal/build/push_api.go @@ -38,6 +38,6 @@ func toLokiApiArguments(config *scrapeconfig.PushTargetConfig, forwardTo []loki. RelabelRules: make(relabel.Rules, 0), Labels: convertPromLabels(config.Labels), UseIncomingTimestamp: config.KeepTimestamp, - Server: common.WeaveWorksServerToFlowServer(config.Server), + Server: common.WeaveworksServerToAlloyServer(config.Server), } } diff --git a/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go b/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go index 2efb4a1271..54409246db 100644 --- a/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go +++ b/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go @@ -102,7 +102,7 @@ func (s *ScrapeConfigBuilder) getOrNewLokiRelabel() string { if s.lokiRelabelReceiverExpr == "" { args := lokirelabel.Arguments{ ForwardTo: s.getOrNewProcessStageReceivers(), - RelabelConfigs: component.ToFlowRelabelConfigs(s.cfg.RelabelConfigs), + RelabelConfigs: component.ToAlloyRelabelConfigs(s.cfg.RelabelConfigs), // max_cache_size doesnt exist in static, and we need to manually set it to default. // Since the default is 10_000 if we didnt set the value, it would compare the default 10k to 0 and emit 0. // We actually dont want to emit anything since this setting doesnt exist in static, setting to 10k matches the default @@ -125,15 +125,15 @@ func (s *ScrapeConfigBuilder) getOrNewProcessStageReceivers() []loki.LogsReceive return s.processStageReceivers } - flowStages := make([]stages.StageConfig, len(s.cfg.PipelineStages)) + alloyStages := make([]stages.StageConfig, len(s.cfg.PipelineStages)) for i, ps := range s.cfg.PipelineStages { if fs, ok := convertStage(ps, s.diags); ok { - flowStages[i] = fs + alloyStages[i] = fs } } args := process.Arguments{ ForwardTo: s.globalCtx.WriteReceivers, - Stages: flowStages, + Stages: alloyStages, } compLabel := common.LabelForParts(s.globalCtx.LabelPrefix, s.cfg.JobName) s.f.Body().AppendBlock(common.NewBlockWithOverride([]string{"loki", "process"}, compLabel, args)) @@ -153,7 +153,7 @@ func (s *ScrapeConfigBuilder) appendDiscoveryRelabel() { return } - relabelConfigs := component.ToFlowRelabelConfigs(s.cfg.RelabelConfigs) + relabelConfigs := component.ToAlloyRelabelConfigs(s.cfg.RelabelConfigs) args := relabel.Arguments{ RelabelConfigs: relabelConfigs, } diff --git a/internal/converter/internal/promtailconvert/internal/build/stages.go b/internal/converter/internal/promtailconvert/internal/build/stages.go index 294c786fe0..7afd405f0b 100644 --- a/internal/converter/internal/promtailconvert/internal/build/stages.go +++ b/internal/converter/internal/promtailconvert/internal/build/stages.go @@ -459,7 +459,7 @@ func convertMetrics(cfg interface{}, diags *diag.Diagnostics) (stages.StageConfi for _, name := range sortedNames { pMetric := (*pMetrics)[name] - fMetric, ok := toFlowMetricProcessStage(name, pMetric, diags) + fMetric, ok := toAlloyMetricsProcessStage(name, pMetric, diags) if !ok { return stages.StageConfig{}, false } @@ -470,7 +470,7 @@ func convertMetrics(cfg interface{}, diags *diag.Diagnostics) (stages.StageConfi }}, true } -func toFlowMetricProcessStage(name string, pMetric promtailstages.MetricConfig, diags *diag.Diagnostics) (stages.MetricConfig, bool) { +func toAlloyMetricsProcessStage(name string, pMetric promtailstages.MetricConfig, diags *diag.Diagnostics) (stages.MetricConfig, bool) { var fMetric stages.MetricConfig var maxIdle time.Duration diff --git a/internal/converter/internal/promtailconvert/promtailconvert.go b/internal/converter/internal/promtailconvert/promtailconvert.go index b99db61bdc..72298e3b63 100644 --- a/internal/converter/internal/promtailconvert/promtailconvert.go +++ b/internal/converter/internal/promtailconvert/promtailconvert.go @@ -75,7 +75,7 @@ func Convert(in []byte, extraArgs []string) ([]byte, diag.Diagnostics) { var buf bytes.Buffer if _, err := f.WriteTo(&buf); err != nil { - diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to render Flow config: %s", err.Error())) + diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to render Alloy config: %s", err.Error())) return nil, diags } @@ -89,7 +89,7 @@ func Convert(in []byte, extraArgs []string) ([]byte, diag.Diagnostics) { } // AppendAll analyzes the entire promtail config in memory and transforms it -// into Flow components. It then appends each argument to the file builder. +// into Alloy components. It then appends each argument to the file builder. func AppendAll(f *builder.File, cfg *promtailcfg.Config, labelPrefix string, diags diag.Diagnostics) diag.Diagnostics { validateTopLevelConfig(cfg, &diags) diff --git a/internal/converter/internal/promtailconvert/testdata/unsupported.diags b/internal/converter/internal/promtailconvert/testdata/unsupported.diags index f5acdc0b20..eb0a979d3d 100644 --- a/internal/converter/internal/promtailconvert/testdata/unsupported.diags +++ b/internal/converter/internal/promtailconvert/testdata/unsupported.diags @@ -1,9 +1,9 @@ -(Error) Promtail's WAL is currently not supported in Flow Mode -(Error) limits_config is not yet supported in Flow Mode -(Warning) If you have a tracing set up for Promtail, it cannot be migrated to Flow Mode automatically. Refer to the documentation on how to configure tracing in Flow Mode. -(Error) reading targets from stdin is not supported in Flow Mode configuration file +(Error) Promtail's WAL is currently not supported in Alloy +(Error) limits_config is not yet supported in Alloy +(Warning) If you have a tracing set up for Promtail, it cannot be migrated to Alloy automatically. Refer to the documentation on how to configure tracing in Alloy. +(Error) reading targets from stdin is not supported in Alloy configuration file (Warning) server.profiling_enabled is not supported - use Agent's main HTTP server's profiling endpoints instead -(Warning) The Agent's Flow Mode metrics are different from the metrics emitted by Promtail. If you rely on Promtail's metrics, you must change your configuration, for example, your alerts and dashboards. -(Warning) The converter does not support converting the provided server.log_level config: The equivalent feature in Flow mode is to use the logging config block to set the level argument. +(Warning) Alloy's metrics are different from the metrics emitted by Promtail. If you rely on Promtail's metrics, you must change your configuration, for example, your alerts and dashboards. +(Warning) The converter does not support converting the provided server.log_level config: The equivalent feature in Alloy is to use the logging config block to set the level argument. (Error) server.http_path_prefix is not supported -(Warning) server.health_check_target disabling is not supported in Flow mode \ No newline at end of file +(Warning) server.health_check_target disabling is not supported in Alloy diff --git a/internal/converter/internal/promtailconvert/validate.go b/internal/converter/internal/promtailconvert/validate.go index 8833b36c57..fdb7454825 100644 --- a/internal/converter/internal/promtailconvert/validate.go +++ b/internal/converter/internal/promtailconvert/validate.go @@ -8,11 +8,11 @@ import ( // validateTopLevelConfig validates the top-level config for any unsupported features. There may still be some // other unsupported features in scope of each config block, which are raised by their respective conversion code. func validateTopLevelConfig(cfg *promtailcfg.Config, diags *diag.Diagnostics) { - // The positions global config is not supported in Flow Mode. + // The positions global config is not supported in Alloy. if cfg.PositionsConfig != DefaultPositionsConfig() { diags.Add( diag.SeverityLevelInfo, - "global positions configuration is not supported - each Flow Mode's loki.source.file component "+ + "global positions configuration is not supported in Alloy - each loki.source.file component "+ "has its own positions file in the component's data directory", ) } @@ -21,7 +21,7 @@ func validateTopLevelConfig(cfg *promtailcfg.Config, diags *diag.Diagnostics) { if cfg.WAL.Enabled { diags.Add( diag.SeverityLevelError, - "Promtail's WAL is currently not supported in Flow Mode", + "Promtail's WAL is currently not supported in Alloy", ) } @@ -31,26 +31,26 @@ func validateTopLevelConfig(cfg *promtailcfg.Config, diags *diag.Diagnostics) { if cfg.LimitsConfig != DefaultLimitsConfig() { diags.Add( diag.SeverityLevelError, - "limits_config is not yet supported in Flow Mode", + "limits_config is not yet supported in Alloy", ) } - // We cannot migrate the tracing config to Flow Mode, since in promtail it relies on + // We cannot migrate the tracing config to Alloy, since in promtail it relies on // environment variables that can be set or not and depending on what is set, different // features of tracing are configured. We'd need to have conditionals in the - // flow config to translate this. See https://www.jaegertracing.io/docs/1.16/client-features/ + // Alloy config to translate this. See https://www.jaegertracing.io/docs/1.16/client-features/ if cfg.Tracing.Enabled { diags.Add( diag.SeverityLevelWarn, - "If you have a tracing set up for Promtail, it cannot be migrated to Flow Mode automatically. "+ - "Refer to the documentation on how to configure tracing in Flow Mode.", + "If you have a tracing set up for Promtail, it cannot be migrated to Alloy automatically. "+ + "Refer to the documentation on how to configure tracing in Alloy.", ) } if cfg.TargetConfig.Stdin { diags.Add( diag.SeverityLevelError, - "reading targets from stdin is not supported in Flow Mode configuration file", + "reading targets from stdin is not supported in Alloy configuration file", ) } if cfg.ServerConfig.ProfilingEnabled { @@ -61,14 +61,14 @@ func validateTopLevelConfig(cfg *promtailcfg.Config, diags *diag.Diagnostics) { if cfg.ServerConfig.RegisterInstrumentation { diags.Add( diag.SeverityLevelWarn, - "The Agent's Flow Mode metrics are different from the metrics emitted by Promtail. If you "+ + "Alloy's metrics are different from the metrics emitted by Promtail. If you "+ "rely on Promtail's metrics, you must change your configuration, for example, your alerts and dashboards.", ) } if cfg.ServerConfig.LogLevel.String() != "info" { diags.Add(diag.SeverityLevelWarn, "The converter does not support converting the provided server.log_level config: "+ - "The equivalent feature in Flow mode is to use the logging config block to set the level argument.") + "The equivalent feature in Alloy is to use the logging config block to set the level argument.") } if cfg.ServerConfig.PathPrefix != "" { @@ -76,6 +76,6 @@ func validateTopLevelConfig(cfg *promtailcfg.Config, diags *diag.Diagnostics) { } if cfg.ServerConfig.HealthCheckTarget != nil && !*cfg.ServerConfig.HealthCheckTarget { - diags.Add(diag.SeverityLevelWarn, "server.health_check_target disabling is not supported in Flow mode") + diags.Add(diag.SeverityLevelWarn, "server.health_check_target disabling is not supported in Alloy") } } diff --git a/internal/converter/internal/staticconvert/internal/build/builder_traces.go b/internal/converter/internal/staticconvert/internal/build/builder_traces.go index 8573ddeae4..330a486759 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder_traces.go +++ b/internal/converter/internal/staticconvert/internal/build/builder_traces.go @@ -44,10 +44,10 @@ func (b *ConfigBuilder) translateAutomaticLogging(otelCfg *otelcol.Config, cfg t } if cfg.AutomaticLogging.Backend == "stdout" { - b.diags.Add(diag.SeverityLevelWarn, "automatic_logging for traces has no direct flow equivalent. "+ + b.diags.Add(diag.SeverityLevelWarn, "automatic_logging for traces has no direct Alloy equivalent. "+ "A best effort translation has been made to otelcol.exporter.logging but the behavior will differ.") } else { - b.diags.Add(diag.SeverityLevelError, "automatic_logging for traces has no direct flow equivalent. "+ + b.diags.Add(diag.SeverityLevelError, "automatic_logging for traces has no direct Alloy equivalent. "+ "A best effort translation can be made which only outputs to stdout and not directly to loki by bypassing errors.") } diff --git a/internal/converter/internal/staticconvert/internal/build/eventhandler.go b/internal/converter/internal/staticconvert/internal/build/eventhandler.go index e0485eb9c0..ae057e2cad 100644 --- a/internal/converter/internal/staticconvert/internal/build/eventhandler.go +++ b/internal/converter/internal/staticconvert/internal/build/eventhandler.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/grafana/alloy/internal/component/common/loki" - flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + alloy_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/internal/component/loki/relabel" "github.com/grafana/alloy/internal/component/loki/source/kubernetes_events" "github.com/grafana/alloy/internal/converter/diag" @@ -19,14 +19,14 @@ func (b *ConfigBuilder) appendEventHandlerV2(config *eventhandler_v2.Config) { b.diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to sanitize job name: %s", err)) } - b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.SendTimeout, eventhandler_v2.DefaultConfig.SendTimeout, "eventhandler send_timeout", "this field is not configurable in flow mode")) - b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.InformerResync, eventhandler_v2.DefaultConfig.InformerResync, "eventhandler informer_resync", "this field is not configurable in flow mode")) - b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.FlushInterval, eventhandler_v2.DefaultConfig.FlushInterval, "eventhandler flush_interval", "this field is not configurable in flow mode")) + b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.SendTimeout, eventhandler_v2.DefaultConfig.SendTimeout, "eventhandler send_timeout", "this field is not configurable in Alloy")) + b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.InformerResync, eventhandler_v2.DefaultConfig.InformerResync, "eventhandler informer_resync", "this field is not configurable in Alloy")) + b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.FlushInterval, eventhandler_v2.DefaultConfig.FlushInterval, "eventhandler flush_interval", "this field is not configurable in Alloy")) if config.CachePath != eventhandler_v2.DefaultConfig.CachePath { b.diags.Add( diag.SeverityLevelWarn, - "The eventhandler cache_path is unnecessary in flow mode because the storage path is governed by the --storage.path cmd argument and is always local to the component.", + "The eventhandler cache_path is unnecessary in Alloy because the storage path is governed by the --storage.path cmd argument and is always local to the component.", ) } @@ -45,9 +45,9 @@ func (b *ConfigBuilder) appendEventHandlerV2(config *eventhandler_v2.Config) { } func (b *ConfigBuilder) injectExtraLabels(config *eventhandler_v2.Config, receiver common.ConvertLogsReceiver, compLabel string) common.ConvertLogsReceiver { - var relabelConfigs []*flow_relabel.Config + var relabelConfigs []*alloy_relabel.Config for _, extraLabel := range config.ExtraLabels { - defaultConfig := flow_relabel.DefaultRelabelConfig + defaultConfig := alloy_relabel.DefaultRelabelConfig relabelConfig := &defaultConfig relabelConfig.SourceLabels = []string{"__address__"} relabelConfig.TargetLabel = extraLabel.Name diff --git a/internal/converter/internal/staticconvert/staticconvert.go b/internal/converter/internal/staticconvert/staticconvert.go index 844537200b..c8c6b342d9 100644 --- a/internal/converter/internal/staticconvert/staticconvert.go +++ b/internal/converter/internal/staticconvert/staticconvert.go @@ -48,7 +48,7 @@ func Convert(in []byte, extraArgs []string) ([]byte, diag.Diagnostics) { var buf bytes.Buffer if _, err := f.WriteTo(&buf); err != nil { - diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to render Flow config: %s", err.Error())) + diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to render Alloy config: %s", err.Error())) return nil, diags } @@ -61,10 +61,10 @@ func Convert(in []byte, extraArgs []string) ([]byte, diag.Diagnostics) { return prettyByte, diags } -// AppendAll analyzes the entire static config in memory and transforms it -// into Flow Arguments. It then appends each argument to the file builder. -// Exports from other components are correctly referenced to build the Flow -// pipeline. +// AppendAll analyzes the entire static config in memory and transforms it into +// Alloy component Arguments. It then appends each argument to the file +// builder. Exports from other components are correctly referenced to build the +// Alloy pipeline. func AppendAll(f *builder.File, staticConfig *config.Config) diag.Diagnostics { var diags diag.Diagnostics diff --git a/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.diags b/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.diags index a4a05d1a3b..8cd9e30103 100644 --- a/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.diags +++ b/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.diags @@ -1 +1 @@ -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. diff --git a/internal/converter/internal/staticconvert/testdata-v2/missing_metrics_config.diags b/internal/converter/internal/staticconvert/testdata-v2/missing_metrics_config.diags index 3f9c877ad8..f4d25ac351 100644 --- a/internal/converter/internal/staticconvert/testdata-v2/missing_metrics_config.diags +++ b/internal/converter/internal/staticconvert/testdata-v2/missing_metrics_config.diags @@ -1,2 +1,2 @@ (Critical) integration agent is looking for an undefined metrics config: not_default -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. diff --git a/internal/converter/internal/staticconvert/testdata-v2/unsupported.diags b/internal/converter/internal/staticconvert/testdata-v2/unsupported.diags index 85fb306a8b..775df06fc7 100644 --- a/internal/converter/internal/staticconvert/testdata-v2/unsupported.diags +++ b/internal/converter/internal/staticconvert/testdata-v2/unsupported.diags @@ -1,7 +1,7 @@ -(Error) The converter does not support converting the provided eventhandler send_timeout config: this field is not configurable in flow mode -(Error) The converter does not support converting the provided eventhandler informer_resync config: this field is not configurable in flow mode -(Error) The converter does not support converting the provided eventhandler flush_interval config: this field is not configurable in flow mode -(Warning) The eventhandler cache_path is unnecessary in flow mode because the storage path is governed by the --storage.path cmd argument and is always local to the component. -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. +(Error) The converter does not support converting the provided eventhandler send_timeout config: this field is not configurable in Alloy +(Error) The converter does not support converting the provided eventhandler informer_resync config: this field is not configurable in Alloy +(Error) The converter does not support converting the provided eventhandler flush_interval config: this field is not configurable in Alloy +(Warning) The eventhandler cache_path is unnecessary in Alloy because the storage path is governed by the --storage.path cmd argument and is always local to the component. +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. (Error) The converter does not support converting the provided app_agent_receiver traces_instance config. -(Error) Support for the vsphere integration has been removed in Grafana Alloy v1.0, and conversion will not be performed.\nTo achieve similar functionality, consider creating an otelcol.receiver.vcenter component and converting generated metrics to a Prometheus pipeline using otelcol.exporter.prometheus. +(Error) Support for the vsphere integration has been removed in Alloy v1.0, and conversion will not be performed.\nTo achieve similar functionality, consider creating an otelcol.receiver.vcenter component and converting generated metrics to a Prometheus pipeline using otelcol.exporter.prometheus. diff --git a/internal/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.diags b/internal/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.diags index a4a05d1a3b..514c64bf15 100644 --- a/internal/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.diags +++ b/internal/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.diags @@ -1 +1 @@ -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your config file where necessary. diff --git a/internal/converter/internal/staticconvert/testdata/dup_labels.diags b/internal/converter/internal/staticconvert/testdata/dup_labels.diags index 0f3041bb7f..b219f8676c 100644 --- a/internal/converter/internal/staticconvert/testdata/dup_labels.diags +++ b/internal/converter/internal/staticconvert/testdata/dup_labels.diags @@ -1,3 +1,3 @@ -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. -(Critical) duplicate label after conversion "discovery.consul.metrics_name_job_test". this is due to how valid flow labels are assembled and can be avoided by updating named properties in the source config. -(Critical) duplicate label after conversion "prometheus.scrape.metrics_name_job_test". this is due to how valid flow labels are assembled and can be avoided by updating named properties in the source config. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. +(Critical) duplicate label after conversion "discovery.consul.metrics_name_job_test". this is due to how valid Alloy labels are assembled and can be avoided by updating named properties in the source config. +(Critical) duplicate label after conversion "prometheus.scrape.metrics_name_job_test". this is due to how valid Alloy labels are assembled and can be avoided by updating named properties in the source config. diff --git a/internal/converter/internal/staticconvert/testdata/integrations.diags b/internal/converter/internal/staticconvert/testdata/integrations.diags index a4a05d1a3b..8cd9e30103 100644 --- a/internal/converter/internal/staticconvert/testdata/integrations.diags +++ b/internal/converter/internal/staticconvert/testdata/integrations.diags @@ -1 +1 @@ -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. diff --git a/internal/converter/internal/staticconvert/testdata/integrations_no_rw.diags b/internal/converter/internal/staticconvert/testdata/integrations_no_rw.diags index 1f0d463ede..893b1f6ac1 100644 --- a/internal/converter/internal/staticconvert/testdata/integrations_no_rw.diags +++ b/internal/converter/internal/staticconvert/testdata/integrations_no_rw.diags @@ -1,2 +1,2 @@ (Error) The converter does not support handling integrations which are not connected to a remote_write. -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. diff --git a/internal/converter/internal/staticconvert/testdata/prom_remote_write.diags b/internal/converter/internal/staticconvert/testdata/prom_remote_write.diags index a4a05d1a3b..8cd9e30103 100644 --- a/internal/converter/internal/staticconvert/testdata/prom_remote_write.diags +++ b/internal/converter/internal/staticconvert/testdata/prom_remote_write.diags @@ -1 +1 @@ -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. diff --git a/internal/converter/internal/staticconvert/testdata/prom_scrape.diags b/internal/converter/internal/staticconvert/testdata/prom_scrape.diags index 8cd4f240ac..c102bd986c 100644 --- a/internal/converter/internal/staticconvert/testdata/prom_scrape.diags +++ b/internal/converter/internal/staticconvert/testdata/prom_scrape.diags @@ -1,3 +1,3 @@ (Error) The converter does not support converting the provided global evaluation_interval config. -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. -(Warning) The converter does not support converting the provided metrics wal_directory config: Use the run command flag --storage.path for Flow mode instead. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. +(Warning) The converter does not support converting the provided metrics wal_directory config: Use the run command flag --storage.path instead. diff --git a/internal/converter/internal/staticconvert/testdata/promtail_prom.diags b/internal/converter/internal/staticconvert/testdata/promtail_prom.diags index a4a05d1a3b..8cd9e30103 100644 --- a/internal/converter/internal/staticconvert/testdata/promtail_prom.diags +++ b/internal/converter/internal/staticconvert/testdata/promtail_prom.diags @@ -1 +1 @@ -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. diff --git a/internal/converter/internal/staticconvert/testdata/promtail_scrape.diags b/internal/converter/internal/staticconvert/testdata/promtail_scrape.diags index a4a05d1a3b..8cd9e30103 100644 --- a/internal/converter/internal/staticconvert/testdata/promtail_scrape.diags +++ b/internal/converter/internal/staticconvert/testdata/promtail_scrape.diags @@ -1 +1 @@ -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. diff --git a/internal/converter/internal/staticconvert/testdata/sanitize.diags b/internal/converter/internal/staticconvert/testdata/sanitize.diags index d02def1345..82be55e850 100644 --- a/internal/converter/internal/staticconvert/testdata/sanitize.diags +++ b/internal/converter/internal/staticconvert/testdata/sanitize.diags @@ -1,2 +1,2 @@ -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. -(Warning) The converter does not support converting the provided metrics wal_directory config: Use the run command flag --storage.path for Flow mode instead. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. +(Warning) The converter does not support converting the provided metrics wal_directory config: Use the run command flag --storage.path instead. diff --git a/internal/converter/internal/staticconvert/testdata/traces.diags b/internal/converter/internal/staticconvert/testdata/traces.diags index 4f7c851a10..e40ce9c03d 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.diags +++ b/internal/converter/internal/staticconvert/testdata/traces.diags @@ -1,2 +1,2 @@ -(Warning) automatic_logging for traces has no direct flow equivalent. A best effort translation has been made to otelcol.exporter.logging but the behavior will differ. -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file +(Warning) automatic_logging for traces has no direct Alloy equivalent. A best effort translation has been made to otelcol.exporter.logging but the behavior will differ. +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. diff --git a/internal/converter/internal/staticconvert/testdata/traces_multi.diags b/internal/converter/internal/staticconvert/testdata/traces_multi.diags index a4a05d1a3b..8cd9e30103 100644 --- a/internal/converter/internal/staticconvert/testdata/traces_multi.diags +++ b/internal/converter/internal/staticconvert/testdata/traces_multi.diags @@ -1 +1 @@ -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. diff --git a/internal/converter/internal/staticconvert/testdata/unsupported.diags b/internal/converter/internal/staticconvert/testdata/unsupported.diags index 0958f0e79e..b3ac16588a 100644 --- a/internal/converter/internal/staticconvert/testdata/unsupported.diags +++ b/internal/converter/internal/staticconvert/testdata/unsupported.diags @@ -1,9 +1,9 @@ (Error) The converter does not support handling integrations which are not being scraped: mssql. (Error) mapping_config is not supported in statsd_exporter integrations config -(Error) automatic_logging for traces has no direct flow equivalent. A best effort translation can be made which only outputs to stdout and not directly to loki by bypassing errors. -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. -(Error) The converter does not support converting the provided grpc_tls_config server config: flow mode does not have a gRPC server to configure. +(Error) automatic_logging for traces has no direct Alloy equivalent. A best effort translation can be made which only outputs to stdout and not directly to loki by bypassing errors. +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. +(Error) The converter does not support converting the provided grpc_tls_config server config: Alloy does not have a gRPC server to configure. (Error) The converter does not support converting the provided prefer_server_cipher_suites server config. -(Warning) The converter does not support converting the provided metrics wal_directory config: Use the run command flag --storage.path for Flow mode instead. +(Warning) The converter does not support converting the provided metrics wal_directory config: Use the run command flag --storage.path instead. (Warning) disabled integrations do nothing and are not included in the output: node_exporter. -(Error) The converter does not support converting the provided agent_management config. \ No newline at end of file +(Error) The converter does not support converting the provided agent_management config. diff --git a/internal/converter/internal/staticconvert/testdata_windows/integrations.diags b/internal/converter/internal/staticconvert/testdata_windows/integrations.diags index a4a05d1a3b..8cd9e30103 100644 --- a/internal/converter/internal/staticconvert/testdata_windows/integrations.diags +++ b/internal/converter/internal/staticconvert/testdata_windows/integrations.diags @@ -1 +1 @@ -(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file +(Warning) Please review your agent command line flags and ensure they are set in your Alloy config file where necessary. diff --git a/internal/converter/internal/staticconvert/validate.go b/internal/converter/internal/staticconvert/validate.go index 36e2937528..9b7e4a8a82 100644 --- a/internal/converter/internal/staticconvert/validate.go +++ b/internal/converter/internal/staticconvert/validate.go @@ -69,7 +69,7 @@ func validate(staticConfig *config.Config) diag.Diagnostics { func validateCommandLine() diag.Diagnostics { var diags diag.Diagnostics - diags.Add(diag.SeverityLevelWarn, "Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary.") + diags.Add(diag.SeverityLevelWarn, "Please review your agent command line flags and ensure they are set in your Alloy config file where necessary.") return diags } @@ -78,7 +78,7 @@ func validateServer(serverConfig *server.Config) diag.Diagnostics { var diags diag.Diagnostics defaultServerConfig := server.DefaultConfig() - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, serverConfig.GRPC, defaultServerConfig.GRPC, "grpc_tls_config server", "flow mode does not have a gRPC server to configure.")) + diags.AddAll(common.ValidateSupported(common.NotDeepEquals, serverConfig.GRPC, defaultServerConfig.GRPC, "grpc_tls_config server", "Alloy does not have a gRPC server to configure.")) diags.AddAll(common.ValidateSupported(common.NotEquals, serverConfig.HTTP.TLSConfig.PreferServerCipherSuites, defaultServerConfig.HTTP.TLSConfig.PreferServerCipherSuites, "prefer_server_cipher_suites server", "")) return diags @@ -101,7 +101,7 @@ func validateMetrics(metricsConfig metrics.Config, grpcListenPort int) diag.Diag diags.AddAll(common.ValidateSupported(common.NotEquals, metricsConfig.IdleConnTimeout, defaultMetrics.IdleConnTimeout, "http_idle_conn_timeout metrics", "")) if metricsConfig.WALDir != defaultMetrics.WALDir { - diags.Add(diag.SeverityLevelWarn, "The converter does not support converting the provided metrics wal_directory config: Use the run command flag --storage.path for Flow mode instead.") + diags.Add(diag.SeverityLevelWarn, "The converter does not support converting the provided metrics wal_directory config: Use the run command flag --storage.path instead.") } return diags @@ -177,7 +177,7 @@ func validateIntegrationsV2(integrationsConfig *v2.SubsystemOptions) diag.Diagno case *vmware_exporter_v2.Config: diags.AddWithDetail( diag.SeverityLevelError, - "Support for the vsphere integration has been removed in Grafana Alloy v1.0, and conversion will not be performed.", + "Support for the vsphere integration has been removed in Alloy v1.0, and conversion will not be performed.", "To achieve similar functionality, consider creating an otelcol.receiver.vcenter component and converting generated metrics to a Prometheus pipeline using otelcol.exporter.prometheus.", ) case *metricsutils_v2.ConfigShim: diff --git a/internal/converter/internal/test_common/testing.go b/internal/converter/internal/test_common/testing.go index c77e9edae1..794b0dc9b3 100644 --- a/internal/converter/internal/test_common/testing.go +++ b/internal/converter/internal/test_common/testing.go @@ -25,12 +25,12 @@ import ( ) const ( - flowSuffix = ".river" + alloySuffix = ".river" diagsSuffix = ".diags" ) // TestDirectory will execute tests for converting from a source configuration -// file to a flow configuration file for all files in a provided folder path. +// file to an Alloy configuration file for all files in a provided folder path. // // For each file in the folderPath which ends with the sourceSuffix: // @@ -42,7 +42,7 @@ const ( // 4. If the current filename.sourceSuffix has a matching filename.river, read // the contents of filename.river and validate that they match the river // configuration generated by calling convert in step 1. -func TestDirectory(t *testing.T, folderPath string, sourceSuffix string, loadFlowConfig bool, extraArgs []string, convert func(in []byte, extraArgs []string) ([]byte, diag.Diagnostics)) { +func TestDirectory(t *testing.T, folderPath string, sourceSuffix string, loadAlloyConfig bool, extraArgs []string, convert func(in []byte, extraArgs []string) ([]byte, diag.Diagnostics)) { require.NoError(t, filepath.WalkDir(folderPath, func(path string, d fs.DirEntry, _ error) error { if d.IsDir() { return nil @@ -51,7 +51,7 @@ func TestDirectory(t *testing.T, folderPath string, sourceSuffix string, loadFlo if strings.HasSuffix(path, sourceSuffix) { tc := filepath.Base(path) t.Run(tc, func(t *testing.T) { - riverFile := strings.TrimSuffix(path, sourceSuffix) + flowSuffix + riverFile := strings.TrimSuffix(path, sourceSuffix) + alloySuffix diagsFile := strings.TrimSuffix(path, sourceSuffix) + diagsSuffix if !fileExists(riverFile) && !fileExists(diagsFile) { t.Fatalf("no expected diags or river for %s - missing test expectations?", path) @@ -67,7 +67,7 @@ func TestDirectory(t *testing.T, folderPath string, sourceSuffix string, loadFlo validateDiags(t, expectedDiags, actualDiags) expectedRiver := getExpectedRiver(t, riverFile) - validateRiver(t, expectedRiver, actualRiver, loadFlowConfig) + validateRiver(t, expectedRiver, actualRiver, loadAlloyConfig) }) } @@ -155,7 +155,7 @@ func fileExists(path string) bool { } // validateRiver makes sure the expected river and actual river are a match -func validateRiver(t *testing.T, expectedRiver []byte, actualRiver []byte, loadFlowConfig bool) { +func validateRiver(t *testing.T, expectedRiver []byte, actualRiver []byte, loadAlloyConfig bool) { if len(expectedRiver) > 0 { if !reflect.DeepEqual(expectedRiver, actualRiver) { fmt.Println("============== ACTUAL =============") @@ -165,14 +165,14 @@ func validateRiver(t *testing.T, expectedRiver []byte, actualRiver []byte, loadF require.Equal(t, string(expectedRiver), string(normalizeLineEndings(actualRiver))) - if loadFlowConfig { - attemptLoadingFlowConfig(t, actualRiver) + if loadAlloyConfig { + attemptLoadingAlloyConfig(t, actualRiver) } } } -// attemptLoadingFlowConfig will attempt to load the Flow config and report any errors. -func attemptLoadingFlowConfig(t *testing.T, river []byte) { +// attemptLoadingAlloyConfig will attempt to load the Alloy config and report any errors. +func attemptLoadingAlloyConfig(t *testing.T, river []byte) { cfg, err := alloy.ParseSource(t.Name(), river) require.NoError(t, err, "the output River config failed to parse: %s", string(normalizeLineEndings(river))) diff --git a/internal/featuregate/featuregate.go b/internal/featuregate/featuregate.go index bbad474c10..53514ee003 100644 --- a/internal/featuregate/featuregate.go +++ b/internal/featuregate/featuregate.go @@ -1,5 +1,5 @@ // Package featuregate provides a way to gate features in the collector based on different options, such as the -// feature's stability level and user-defined minimum allowed stability level. This package is used by Flow Mode only. +// feature's stability level and user-defined minimum allowed stability level. package featuregate import ( diff --git a/internal/service/cluster/cluster.go b/internal/service/cluster/cluster.go index 763a5db853..b37c49c405 100644 --- a/internal/service/cluster/cluster.go +++ b/internal/service/cluster/cluster.go @@ -1,5 +1,5 @@ -// Package cluster implements the cluster service for Flow, where multiple -// instances of Flow connect to each other for work distribution. +// Package cluster implements the cluster service, where multiple instances of +// Alloy connect to each other for work distribution. package cluster import ( @@ -55,7 +55,7 @@ type Options struct { Tracer trace.TracerProvider // Where to send traces. // EnableClustering toggles clustering as a whole. When EnableClustering is - // false, the instance of Flow acts as a single-node cluster and it is not + // false, the instance of Alloy acts as a single-node cluster and it is not // possible for other nodes to join the cluster. EnableClustering bool @@ -347,7 +347,7 @@ func (s *Service) Data() any { return &sharderCluster{sharder: s.sharder} } -// Component is a Flow component which subscribes to clustering updates. +// Component is a component which subscribes to clustering updates. type Component interface { component.Component diff --git a/internal/service/http/http.go b/internal/service/http/http.go index a84c1e2506..eaf6bfc2ae 100644 --- a/internal/service/http/http.go +++ b/internal/service/http/http.go @@ -1,4 +1,4 @@ -// Package http implements the HTTP service for Flow. +// Package http implements the HTTP service. package http import ( @@ -384,7 +384,7 @@ func (d Data) HTTPPathForComponent(componentID string) string { return merged } -// Component is a Flow component which also contains a custom HTTP handler. +// Component is a component which also contains a custom HTTP handler. type Component interface { component.Component diff --git a/internal/service/labelstore/service.go b/internal/service/labelstore/service.go index c80268b935..6d626ca589 100644 --- a/internal/service/labelstore/service.go +++ b/internal/service/labelstore/service.go @@ -8,8 +8,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/alloy/internal/alloy/logging/level" "github.com/grafana/alloy/internal/featuregate" - agent_service "github.com/grafana/alloy/internal/service" - flow_service "github.com/grafana/alloy/internal/service" + alloy_service "github.com/grafana/alloy/internal/service" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" @@ -36,7 +35,7 @@ type staleMarker struct { type Arguments struct{} -var _ flow_service.Service = (*service)(nil) +var _ alloy_service.Service = (*service)(nil) func New(l log.Logger, r prometheus.Registerer) *service { if l == nil { @@ -63,8 +62,8 @@ func New(l log.Logger, r prometheus.Registerer) *service { // Definition returns the Definition of the Service. // Definition must always return the same value across all // calls. -func (s *service) Definition() agent_service.Definition { - return agent_service.Definition{ +func (s *service) Definition() alloy_service.Definition { + return alloy_service.Definition{ Name: ServiceName, ConfigType: Arguments{}, DependsOn: nil, @@ -90,7 +89,7 @@ func (s *service) Collect(m chan<- prometheus.Metric) { // Run starts a Service. Run must block until the provided // context is canceled. Returning an error should be treated // as a fatal error for the Service. -func (s *service) Run(ctx context.Context, host agent_service.Host) error { +func (s *service) Run(ctx context.Context, host alloy_service.Host) error { staleCheck := time.NewTicker(10 * time.Minute) for { select { diff --git a/internal/service/otel/otel.go b/internal/service/otel/otel.go index 5415277c10..18821f9397 100644 --- a/internal/service/otel/otel.go +++ b/internal/service/otel/otel.go @@ -1,4 +1,4 @@ -// Package otel implements the otel service for Flow. +// Package otel implements the otel service. // This service registers feature gates will be used by the otelcol components // based on upstream Collector components. package otel @@ -30,7 +30,7 @@ func New(logger log.Logger) *Service { // an Otel component. If we set the feature gates in Run(), it will // be too late - Otel would have already checked the feature gate by then. // This is because the services are not started prior to the graph evaluation. - err := util.SetupFlowModeOtelFeatureGates() + err := util.SetupOtelFeatureGates() if err != nil { logger.Log("msg", "failed to set up Otel feature gates", "err", err) return nil diff --git a/internal/service/service.go b/internal/service/service.go index 8901947813..4d2fcdafd9 100644 --- a/internal/service/service.go +++ b/internal/service/service.go @@ -1,6 +1,6 @@ -// Package service defines a pluggable service for the Flow system. +// Package service defines a pluggable service. // -// Services are low-level constructs which run for the lifetime of the Flow +// Services are low-level constructs which run for the lifetime of the Alloy // controller, and are given deeper levels of access to the overall system // compared to components, such as the individual instances of running // components. @@ -14,9 +14,9 @@ import ( "github.com/grafana/alloy/internal/featuregate" ) -// Definition describes an individual Flow service. Services have unique names -// and optional ConfigTypes where they can be configured within the root Flow -// module. +// Definition describes an individual service. Services have unique names +// and optional ConfigTypes where they can be configured within the main +// configuration. type Definition struct { // Name uniquely defines a service. Name string @@ -34,7 +34,7 @@ type Definition struct { // specific service by name. If DependsOn includes an invalid // reference to a service (either because of a cyclic dependency, // or a named service doesn't exist), it is treated as a fatal - // error and the root Flow module will exit. + // error and the main configuration will exit. DependsOn []string // Stability is the overall stability level of the service. This is used to @@ -46,7 +46,7 @@ type Definition struct { Stability featuregate.Stability } -// Host is a controller for services and Flow components. +// Host is a controller for services and components. type Host interface { // GetComponent gets a running component by ID. // @@ -72,7 +72,7 @@ type Host interface { NewController(id string) Controller } -// Controller is implemented by flow.Flow. +// Controller is implemented by alloy.Alloy. type Controller interface { Run(ctx context.Context) LoadSource(source []byte, args map[string]any) error diff --git a/internal/service/ui/ui.go b/internal/service/ui/ui.go index 6ba32ef415..3eb866939e 100644 --- a/internal/service/ui/ui.go +++ b/internal/service/ui/ui.go @@ -75,7 +75,7 @@ func (s *Service) Data() any { func (s *Service) ServiceHandler(host service.Host) (base string, handler http.Handler) { r := mux.NewRouter() - fa := api.NewFlowAPI(host) + fa := api.NewAlloyAPI(host) fa.RegisterRoutes(path.Join(s.opts.UIPrefix, "/api/v0/web"), r) ui.RegisterRoutes(s.opts.UIPrefix, r) diff --git a/internal/static/traces/promsdprocessor/consumer/consumer.go b/internal/static/traces/promsdprocessor/consumer/consumer.go index d3660558ee..e0f416ca7a 100644 --- a/internal/static/traces/promsdprocessor/consumer/consumer.go +++ b/internal/static/traces/promsdprocessor/consumer/consumer.go @@ -97,7 +97,7 @@ func NewConsumer(opts Options, logger log.Logger) (*Consumer, error) { return c, nil } -// UpdateOptions is used in flow mode, where all options need to be updated. +// UpdateOptions is used in Alloy where all options need to be updated. func (c *Consumer) UpdateOptions(opts Options) error { c.optsMut.Lock() defer c.optsMut.Unlock() diff --git a/internal/useragent/useragent.go b/internal/useragent/useragent.go index 10eb4fce6f..82a6bd831b 100644 --- a/internal/useragent/useragent.go +++ b/internal/useragent/useragent.go @@ -1,6 +1,7 @@ -// package useragent provides a consistent way to get a user agent for outbound http requests from Grafana Agent. -// The default User-Agent is `GrafanaAgent/$VERSION($MODE)` -// Where version is the build version of the agent and MODE is one of "static" or "flow". +// package useragent provides a consistent way to get a user agent for outbound +// http requests from Grafana Alloy. The default User-Agent is `Alloy/VERSION +// (METADATA)`, where VERSION is the build version of Alloy and METADATA +// includes information about how Alloy was deployed. package useragent import ( @@ -14,7 +15,6 @@ import ( const ( deployModeEnv = "AGENT_DEPLOY_MODE" - modeEnv = "AGENT_MODE" ) // settable by tests @@ -24,9 +24,6 @@ var executable = os.Executable func Get() string { parenthesis := "" metadata := []string{} - if mode := getRunMode(); mode != "" { - metadata = append(metadata, mode) - } metadata = append(metadata, goos) if op := GetDeployMode(); op != "" { metadata = append(metadata, op) @@ -34,20 +31,7 @@ func Get() string { if len(metadata) > 0 { parenthesis = fmt.Sprintf(" (%s)", strings.Join(metadata, "; ")) } - return fmt.Sprintf("GrafanaAgent/%s%s", build.Version, parenthesis) -} - -// getRunMode attempts to get agent mode, using `unknown` for invalid values. -func getRunMode() string { - key := os.Getenv(modeEnv) - switch key { - case "flow": - return "flow" - case "static", "": - return "static" - default: - return "unknown" - } + return fmt.Sprintf("Alloy/%s%s", build.Version, parenthesis) } // GetDeployMode returns our best-effort guess at the way Grafana Agent was deployed. diff --git a/internal/useragent/useragent_test.go b/internal/useragent/useragent_test.go index 3502ec2137..f950c603be 100644 --- a/internal/useragent/useragent_test.go +++ b/internal/useragent/useragent_test.go @@ -11,76 +11,53 @@ func TestUserAgent(t *testing.T) { build.Version = "v1.2.3" tests := []struct { Name string - Mode string Expected string DeployMode string GOOS string Exe string }{ { - Name: "basic", - Mode: "", - Expected: "GrafanaAgent/v1.2.3 (static; linux; binary)", + Name: "linux", + Expected: "Alloy/v1.2.3 (linux; binary)", GOOS: "linux", }, { - Name: "flow", - Mode: "flow", - Expected: "GrafanaAgent/v1.2.3 (flow; windows; binary)", + Name: "windows", + Expected: "Alloy/v1.2.3 (windows; binary)", GOOS: "windows", }, { - Name: "static", - Mode: "static", - Expected: "GrafanaAgent/v1.2.3 (static; darwin; binary)", + Name: "darwin", + Expected: "Alloy/v1.2.3 (darwin; binary)", GOOS: "darwin", }, - { - Name: "unknown", - Mode: "blahlahblah", - // unknown mode, should not happen. But we will substitute 'unknown' to avoid allowing arbitrary cardinality. - Expected: "GrafanaAgent/v1.2.3 (unknown; freebsd; binary)", - GOOS: "freebsd", - }, - { - Name: "operator", - Mode: "static", - DeployMode: "operator", - Expected: "GrafanaAgent/v1.2.3 (static; linux; operator)", - GOOS: "linux", - }, { Name: "deb", - Mode: "flow", DeployMode: "deb", - Expected: "GrafanaAgent/v1.2.3 (flow; linux; deb)", + Expected: "Alloy/v1.2.3 (linux; deb)", GOOS: "linux", }, { Name: "rpm", - Mode: "static", DeployMode: "rpm", - Expected: "GrafanaAgent/v1.2.3 (static; linux; rpm)", + Expected: "Alloy/v1.2.3 (linux; rpm)", GOOS: "linux", }, { Name: "docker", - Mode: "flow", DeployMode: "docker", - Expected: "GrafanaAgent/v1.2.3 (flow; linux; docker)", + Expected: "Alloy/v1.2.3 (linux; docker)", GOOS: "linux", }, { Name: "helm", - Mode: "flow", DeployMode: "helm", - Expected: "GrafanaAgent/v1.2.3 (flow; linux; helm)", + Expected: "Alloy/v1.2.3 (linux; helm)", GOOS: "linux", }, { Name: "brew", - Mode: "flow", - Expected: "GrafanaAgent/v1.2.3 (flow; darwin; brew)", + Expected: "Alloy/v1.2.3 (darwin; brew)", GOOS: "darwin", Exe: "/opt/homebrew/bin/agent", }, @@ -94,7 +71,6 @@ func TestUserAgent(t *testing.T) { } goos = tst.GOOS t.Setenv(deployModeEnv, tst.DeployMode) - t.Setenv(modeEnv, tst.Mode) actual := Get() require.Equal(t, tst.Expected, actual) }) diff --git a/internal/util/otel_feature_gate.go b/internal/util/otel_feature_gate.go index 643f1e4773..44685426a0 100644 --- a/internal/util/otel_feature_gate.go +++ b/internal/util/otel_feature_gate.go @@ -10,19 +10,19 @@ import ( var ( // Enable the "telemetry.useOtelForInternalMetrics" Collector feature gate. // Currently, Collector components uses OpenCensus metrics by default. - // Those metrics cannot be integrated with Agent Flow, - // so we need to always use OpenTelemetry metrics. + // Those metrics cannot be integrated with Alloy, so we need to always use + // OpenTelemetry metrics. // // TODO: Remove "telemetry.useOtelForInternalMetrics" when Collector components // use OpenTelemetry metrics by default. - flowModeOtelFeatureGates = []string{ + otelFeatureGates = []string{ "telemetry.useOtelForInternalMetrics", } ) -// Enables a set of feature gates which should always be enabled for Flow mode. -func SetupFlowModeOtelFeatureGates() error { - return EnableOtelFeatureGates(flowModeOtelFeatureGates...) +// Enables a set of feature gates which should always be enabled in Alloy. +func SetupOtelFeatureGates() error { + return EnableOtelFeatureGates(otelFeatureGates...) } // Enables a set of feature gates in Otel's Global Feature Gate Registry. diff --git a/internal/util/otel_feature_gate_test.go b/internal/util/otel_feature_gate_test.go index e3809de8cb..6001adfe8e 100644 --- a/internal/util/otel_feature_gate_test.go +++ b/internal/util/otel_feature_gate_test.go @@ -15,7 +15,7 @@ func Test_FeatureGates(t *testing.T) { fgSet := make(map[string]struct{}) - for _, fg := range flowModeOtelFeatureGates { + for _, fg := range otelFeatureGates { fgSet[fg] = struct{}{} } @@ -31,7 +31,7 @@ func Test_FeatureGates(t *testing.T) { require.Falsef(t, g.IsEnabled(), "feature gate %s is enabled - should it be removed from the Agent?", g.ID()) }) - require.NoError(t, SetupFlowModeOtelFeatureGates()) + require.NoError(t, SetupOtelFeatureGates()) reg.VisitAll(func(g *featuregate.Gate) { if _, ok := fgSet[g.ID()]; !ok { diff --git a/internal/util/test_logger.go b/internal/util/test_logger.go index 76a999f360..8e19b6c66a 100644 --- a/internal/util/test_logger.go +++ b/internal/util/test_logger.go @@ -23,8 +23,8 @@ func TestLogger(t testing.TB) log.Logger { return l } -// TestFlowLogger generates a Flow-compatible logger for a test. -func TestFlowLogger(t require.TestingT) *logging.Logger { +// TestAlloyLogger generates an Alloy-compatible logger for a test. +func TestAlloyLogger(t require.TestingT) *logging.Logger { if t, ok := t.(*testing.T); ok { t.Helper() } diff --git a/internal/web/api/api.go b/internal/web/api/api.go index a6e34dc38e..edc1328857 100644 --- a/internal/web/api/api.go +++ b/internal/web/api/api.go @@ -1,4 +1,4 @@ -// Package api implements the HTTP API used for the Grafana Agent Flow UI. +// Package api implements the HTTP API used for the Grafana Alloy UI. // // The API is internal only; it is not stable and shouldn't be relied on // externally. @@ -16,29 +16,29 @@ import ( "github.com/prometheus/prometheus/util/httputil" ) -// FlowAPI is a wrapper around the component API. -type FlowAPI struct { - flow service.Host +// AlloyAPI is a wrapper around the component API. +type AlloyAPI struct { + alloy service.Host } -// NewFlowAPI instantiates a new Flow API. -func NewFlowAPI(flow service.Host) *FlowAPI { - return &FlowAPI{flow: flow} +// NewAlloyAPI instantiates a new Alloy API. +func NewAlloyAPI(alloy service.Host) *AlloyAPI { + return &AlloyAPI{alloy: alloy} } // RegisterRoutes registers all the API's routes. -func (f *FlowAPI) RegisterRoutes(urlPrefix string, r *mux.Router) { +func (a *AlloyAPI) RegisterRoutes(urlPrefix string, r *mux.Router) { // NOTE(rfratto): {id:.+} is used in routes below to allow the // id to contain / characters, which is used by nested module IDs and // component IDs. - r.Handle(path.Join(urlPrefix, "/modules/{moduleID:.+}/components"), httputil.CompressionHandler{Handler: f.listComponentsHandler()}) - r.Handle(path.Join(urlPrefix, "/components"), httputil.CompressionHandler{Handler: f.listComponentsHandler()}) - r.Handle(path.Join(urlPrefix, "/components/{id:.+}"), httputil.CompressionHandler{Handler: f.getComponentHandler()}) - r.Handle(path.Join(urlPrefix, "/peers"), httputil.CompressionHandler{Handler: f.getClusteringPeersHandler()}) + r.Handle(path.Join(urlPrefix, "/modules/{moduleID:.+}/components"), httputil.CompressionHandler{Handler: a.listComponentsHandler()}) + r.Handle(path.Join(urlPrefix, "/components"), httputil.CompressionHandler{Handler: a.listComponentsHandler()}) + r.Handle(path.Join(urlPrefix, "/components/{id:.+}"), httputil.CompressionHandler{Handler: a.getComponentHandler()}) + r.Handle(path.Join(urlPrefix, "/peers"), httputil.CompressionHandler{Handler: a.getClusteringPeersHandler()}) } -func (f *FlowAPI) listComponentsHandler() http.HandlerFunc { +func (a *AlloyAPI) listComponentsHandler() http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // moduleID is set from the /modules/{moduleID:.+}/components route above // but not from the /components route. @@ -47,7 +47,7 @@ func (f *FlowAPI) listComponentsHandler() http.HandlerFunc { moduleID = vars["moduleID"] } - components, err := f.flow.ListComponents(moduleID, component.InfoOptions{ + components, err := a.alloy.ListComponents(moduleID, component.InfoOptions{ GetHealth: true, }) if err != nil { @@ -64,12 +64,12 @@ func (f *FlowAPI) listComponentsHandler() http.HandlerFunc { } } -func (f *FlowAPI) getComponentHandler() http.HandlerFunc { +func (a *AlloyAPI) getComponentHandler() http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) requestedComponent := component.ParseID(vars["id"]) - component, err := f.flow.GetComponent(requestedComponent, component.InfoOptions{ + component, err := a.alloy.GetComponent(requestedComponent, component.InfoOptions{ GetHealth: true, GetArguments: true, GetExports: true, @@ -89,11 +89,11 @@ func (f *FlowAPI) getComponentHandler() http.HandlerFunc { } } -func (f *FlowAPI) getClusteringPeersHandler() http.HandlerFunc { +func (a *AlloyAPI) getClusteringPeersHandler() http.HandlerFunc { return func(w http.ResponseWriter, _ *http.Request) { // TODO(@tpaschalis) Detect if clustering is disabled and propagate to // the Typescript code (eg. via the returned status code?). - svc, found := f.flow.GetService(cluster.ServiceName) + svc, found := a.alloy.GetService(cluster.ServiceName) if !found { http.Error(w, "cluster service not running", http.StatusInternalServerError) return diff --git a/internal/web/ui/README.md b/internal/web/ui/README.md index b4e7d3e231..696d63928d 100644 --- a/internal/web/ui/README.md +++ b/internal/web/ui/README.md @@ -1,4 +1,4 @@ -# Grafana Agent Flow UI +# Grafana Alloy UI ## Prerequisites diff --git a/internal/web/ui/src/features/component/ComponentView.tsx b/internal/web/ui/src/features/component/ComponentView.tsx index bf97e187fe..8e8306b5ce 100644 --- a/internal/web/ui/src/features/component/ComponentView.tsx +++ b/internal/web/ui/src/features/component/ComponentView.tsx @@ -95,7 +95,7 @@ export const ComponentView: FC = (props) => {
- + Documentation
diff --git a/internal/web/ui/ui.go b/internal/web/ui/ui.go index 3e7ff81ce0..0083395995 100644 --- a/internal/web/ui/ui.go +++ b/internal/web/ui/ui.go @@ -1,4 +1,4 @@ -// Package ui exposes utilities to get a Handler for the Grafana Agent Flow UI. +// Package ui exposes utilities to get a Handler for the Grafana Alloy UI. package ui import ( @@ -18,7 +18,7 @@ import ( ) // RegisterRoutes registers routes to the provided mux.Router for serving the -// Grafana Agent Flow UI. The UI will be served relative to pathPrefix. If no +// Grafana Alloy UI. The UI will be served relative to pathPrefix. If no // pathPrefix is specified, the UI will be served at root. // // By default, the UI is retrieved from the ./internal/web/ui/build directory